From 149e9b35365d94a6564942d42af996af487ea2a7 Mon Sep 17 00:00:00 2001 From: Simon Pirkelmann Date: Wed, 14 Oct 2020 21:15:43 +0200 Subject: [PATCH] coupled robot control with new position estimator --- remote_control/opencv_viewer_example.py | 280 ++++++++++++++++++++++++ remote_control/roborally.py | 179 ++++----------- 2 files changed, 321 insertions(+), 138 deletions(-) create mode 100644 remote_control/opencv_viewer_example.py diff --git a/remote_control/opencv_viewer_example.py b/remote_control/opencv_viewer_example.py new file mode 100644 index 0000000..09a07c7 --- /dev/null +++ b/remote_control/opencv_viewer_example.py @@ -0,0 +1,280 @@ +## License: Apache 2.0. See LICENSE file in root directory. +## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved. + +############################################### +## Open CV and Numpy integration ## +############################################### + +import pyrealsense2 as rs +import numpy as np +import cv2 +from cv2 import aruco +from shapely.geometry import LineString + +import time + +DS5_product_ids = ["0AD1", "0AD2", "0AD3", "0AD4", "0AD5", "0AF6", "0AFE", "0AFF", "0B00", "0B01", "0B03", "0B07","0B3A"] + +def find_device_that_supports_advanced_mode() : + ctx = rs.context() + ds5_dev = rs.device() + devices = ctx.query_devices() + for dev in devices: + if dev.supports(rs.camera_info.product_id) and str(dev.get_info(rs.camera_info.product_id)) in DS5_product_ids: + if dev.supports(rs.camera_info.name): + print("Found device that supports advanced mode:", dev.get_info(rs.camera_info.name)) + return dev + raise Exception("No device that supports advanced mode was found") + +class ArucoEstimator: + grid_columns = 10 + grid_rows = 8 + corner_marker_ids = { + 'a': 0, + 'b': 1, + 'c': 2, + 'd': 3 + } + + robot_marker_ids = [12] + + robot_marker_estimates = dict([(id, None) for id in robot_marker_ids]) + angles = [] + + corner_estimates = { + 'a': (None, 0), # (estimate, n_estimates) + 'b': (None, 0), + 'c': (None, 0), + 'd': (None, 0) + } + + def __init__(self): + if True: # check if realsense camera is connected + # Configure depth and color streams + self.pipeline = rs.pipeline() + config = rs.config() + # config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) + # config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30) + config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30) + # config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) + + # Start streaming + self.pipeline.start(config) + + camera_intrinsics = self.pipeline.get_active_profile().get_stream( + rs.stream.color).as_video_stream_profile().get_intrinsics() + self.camera_matrix = np.zeros((3, 3)) + self.camera_matrix[0][0] = camera_intrinsics.fx + self.camera_matrix[1][1] = camera_intrinsics.fy + self.camera_matrix[0][2] = camera_intrinsics.ppx + self.camera_matrix[1][2] = camera_intrinsics.ppy + self.dist_coeffs = np.array(camera_intrinsics.coeffs) + # more info: https://dev.intelrealsense.com/docs/projection-in-intel-realsense-sdk-20 + else: + # use other camera + self.cv_camera = cv2.VideoCapture(0) + self.pipeline = None + + # array containing pose estimates for each marker + estimates = {} + + def run_tracking(self): + try: + while True: + if self.pipeline: + frames = self.pipeline.wait_for_frames() + color_frame = frames.get_color_frame() + if not color_frame: + continue + # Convert images to numpy arrays + color_image = np.asanyarray(color_frame.get_data()) + else: + # Capture frame-by-frame + ret, color_image = self.cv_camera.read() + + gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY) + + # aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_250) + aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL) + parameters = aruco.DetectorParameters_create() + corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters) + frame = aruco.drawDetectedMarkers(color_image.copy(), corners, ids) + + if ids is not None: + for id, c in zip(ids, corners): + res = aruco.estimatePoseSingleMarkers(c, 0.10, self.camera_matrix, self.dist_coeffs) + rvecs = res[0] + tvecs = res[1] + + self.update_estimate(id, rvecs, tvecs) + + frame = self.draw_grid_lines(frame, corners, ids) + frame = self.draw_robot_pos(frame, corners, ids) + + # Show images + cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) + cv2.imshow('RealSense', frame) + cv2.waitKey(1) + + finally: + # Stop streaming + self.pipeline.stop() + + + # import matplotlib.pyplot as plt + + # plt.plot(playboard.angles) + # plt.show() + + + def update_estimate(self, id, rvec, tvec): + # update the marker estimate with new data + if id in self.corner_marker_ids.values(): + # for corner markers we compute the mean of all measurements s.t. the position stabilizes over time + # (we assume the markers do not move) + corner = next(filter(lambda key: self.corner_marker_ids[key] == id, self.corner_marker_ids.keys())) # get corresponding corner to the detected marker + old_estimate = self.corner_estimates[corner][0] + n_estimates = self.corner_estimates[corner][1] + tvec_proj = tvec[0][0][0:2] # projection to get rid of z coordinate + tvec_proj = np.array((tvec_proj[0], -tvec_proj[1])) # flip y coordinate + if old_estimate is not None: + new_estimate = (n_estimates * old_estimate + tvec_proj) / (n_estimates + 1) # weighted update + else: + new_estimate = tvec_proj # first estimate + self.corner_estimates[corner] = (new_estimate, n_estimates + 1) + elif id in self.robot_marker_ids: + # for robot markers we extract x and y position as well as the angle + # here we could also implement a filter + x = tvec[0][0][0] + y = -tvec[0][0][1] # flip y coordinate + + # compute angle + rot_mat, _ = cv2.Rodrigues(rvec[0][0]) + pose_mat = cv2.hconcat((rot_mat, tvec[0][0])) + _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat) + angle = -euler_angles[2][0] * np.pi / 180.0 + + self.angles.append(angle) + self.robot_marker_estimates[id[0]] = (x, y, angle) + + def all_corners_detected(self): + # checks if all corner markers have been detected at least once + return not any([estimate[0] is None for estimate in self.corner_estimates.values()]) + + def get_pos_from_grid_point(self, x, y): + # returns the position for the given grid point based on the current corner estimates + assert x >= 0 and x < self.grid_columns + assert y >= 0 and y < self.grid_rows + assert self.all_corners_detected() + + # compute column line + a = self.corner_estimates['a'][0] + b = self.corner_estimates['b'][0] + c = self.corner_estimates['c'][0] + d = self.corner_estimates['d'][0] + vab = b - a + vdc = c - d + column_line_top = a + (x + 0.5)/self.grid_columns * vab + column_line_bottom = d + (x + 0.5) / self.grid_columns * vdc + + vad = d - a + vbc = c - b + row_line_top = a + (y + 0.5)/self.grid_rows * vad + row_line_bottom = b + (y + 0.5) / self.grid_rows * vbc + + column_line = LineString([column_line_top, column_line_bottom]) + row_line = LineString([row_line_top, row_line_bottom]) + + int_pt = column_line.intersection(row_line) + point_of_intersection = np.array([int_pt.x, int_pt.y]) + + return point_of_intersection + + def get_grid_point_from_pos(self): + # return the nearest grid point for the given position estimate + pass + + def print_corner_estimates(self): + for key, value in self.corner_estimates.items(): + if value[0] is not None: + print("corner marker {} at pos {}".format(key, value[0])) + print() + + def draw_corner_line(self, frame, corner_1, corner_2, corner_coords_dict): + # draws a line between the given markers onto the given frame + if corner_1 in corner_coords_dict and corner_2 in corner_coords_dict: + frame = cv2.line(frame, corner_coords_dict[corner_1], corner_coords_dict[corner_2], color=(0, 0, 255), + thickness=2) + return frame + + def draw_grid_lines(self, frame, corners, ids): + # draws a grid onto the given frame + board_corners_pixel_coords = {} + for corner, id in self.corner_marker_ids.items(): + try: + ind, _ = np.where(ids == id) # find index + ind = ind[0] + board_corners_pixel_coords[corner] = tuple(np.mean(corners[ind][0], axis=0)) + except IndexError: + pass + + frame = self.draw_corner_line(frame, 'a', 'b', board_corners_pixel_coords) + frame = self.draw_corner_line(frame, 'b', 'c', board_corners_pixel_coords) + frame = self.draw_corner_line(frame, 'c', 'd', board_corners_pixel_coords) + frame = self.draw_corner_line(frame, 'd', 'a', board_corners_pixel_coords) + + if set(board_corners_pixel_coords.keys()) == set(self.corner_marker_ids.keys()): # all markers have been detected + # compute column line + a = np.array(board_corners_pixel_coords['a']) + b = np.array(board_corners_pixel_coords['b']) + c = np.array(board_corners_pixel_coords['c']) + d = np.array(board_corners_pixel_coords['d']) + + vab = b - a + vdc = c - d + for x in range(1,self.grid_columns): + column_line_top = a + x / self.grid_columns * vab + column_line_bottom = d + x / self.grid_columns * vdc + frame = cv2.line(frame, tuple(column_line_top), tuple(column_line_bottom), color=(255, 0, 0), + thickness=1) + + vad = d - a + vbc = c - b + for y in range(1, self.grid_rows): + row_line_top = a + y / self.grid_rows * vad + row_line_bottom = b + y / self.grid_rows * vbc + frame = cv2.line(frame, tuple(row_line_top), tuple(row_line_bottom), color=(255, 0, 0), + thickness=1) + + return frame + + def get_robot_state_estimate(self, id): + if id in self.robot_marker_estimates: + if self.robot_marker_estimates[id] is not None: + return self.robot_marker_estimates[id] + else: + print(f"error: no estimate available for robot {id}") + else: + print(f"error: invalid robot id {id}") + + def draw_robot_pos(self, frame, corners, ids): + # draws information about the robot positions onto the given frame + robot_corners_pixel_coords = {} + for id, estimate in self.robot_marker_estimates.items(): + try: + ind, _ = np.where(ids == id) # find index + ind = ind[0] + robot_corners_pixel_coords[id] = tuple(np.mean(corners[ind][0], axis=0)) + except IndexError: + pass + + for id, coord in robot_corners_pixel_coords.items(): + x = self.robot_marker_estimates[id][0] + y = self.robot_marker_estimates[id][1] + angle = self.robot_marker_estimates[id][2] + frame = cv2.putText(frame, "pos = ({:5.3f}, {:5.3f}), ang = {:5.3f}".format(x, y, angle), coord, cv2.FONT_HERSHEY_SIMPLEX, 0.50, (0,255,0)) + return frame + + +if __name__ == "__main__": + playboard = Board() \ No newline at end of file diff --git a/remote_control/roborally.py b/remote_control/roborally.py index 9faa7bf..f160513 100644 --- a/remote_control/roborally.py +++ b/remote_control/roborally.py @@ -26,34 +26,36 @@ import time from casadi_opt import OpenLoopSolver -from marker_pos_angle.msg import id_pos_angle +#from marker_pos_angle.msg import id_pos_angle from collections import OrderedDict from argparse import ArgumentParser +import opencv_viewer_example + MSGLEN = 64 def myreceive(sock): chunks = [] bytes_recd = 0 while bytes_recd < MSGLEN: - chunk = sock.recv(min(MSGLEN - bytes_recd, 2048)) + chunk = sock.recv(1) if chunk == b'': raise RuntimeError("socket connection broken") chunks.append(chunk) bytes_recd = bytes_recd + len(chunk) - if chunk[-1] == '\n': + if chunks[-1] == b'\n': break return b''.join(chunks) class Robot: - def __init__(self, id, ip): + def __init__(self, id, ip, grid_pos = (0,0,0)): self.pos = None self.orient = None - self.grid_pos = (0,0,0) + self.grid_pos = grid_pos self.id = id @@ -111,8 +113,13 @@ class RemoteController: self.anim_stopped = False - self.robots = [Robot(11, '192.168.1.11'), Robot(14, '192.168.1.14')] - #self.robots = [Robot(11, '192.168.1.101')] +# self.robots = #[Robot(11, '192.168.1.11', (6, -3, np.pi)), Robot(12, '192.168.1.12', (6, -3, np.pi)), + # Robot(13, '192.168.1.13', (6, -3, np.pi)), Robot(14, '192.168.1.14', (6, -2, np.pi))] + #self.robots = [Robot(13, '192.168.1.13', (6, -3, np.pi))] + #self.robots = [] + #self.robots = [Robot(11, '192.168.1.11', (6,-3,0)), Robot(14, '192.168.1.14', (6,3,0))] + #self.robots = [Robot(11, '192.168.1.11'), Robot(14, '192.168.1.14')] + self.robots = [Robot(12, '192.168.1.12')] #self.robots = [Robot(15, '192.168.1.102')] #self.robots = [Robot(id, ip)] @@ -148,42 +155,14 @@ class RemoteController: self.mutex = threading.Lock() # ROS subscriber for detected markers - marker_sub = rospy.Subscriber("/marker_id_pos_angle", id_pos_angle, self.measurement_callback) + self.estimator = opencv_viewer_example.ArucoEstimator() + + self.estimator_thread = threading.Thread(target=self.estimator.run_tracking) + self.estimator_thread.start() # pid parameters self.controlling = False - # animation - self.fig = plt.figure() - self.ax = self.fig.add_subplot(2,2,1) - self.ax2 = self.fig.add_subplot(2, 2, 2) - self.ax3 = self.fig.add_subplot(2, 2, 4) - - self.xdata, self.ydata = [], [] - self.line, = self.ax.plot([],[], color='grey', linestyle=':') - self.line_sim, = self.ax.plot([], []) - self.line_ol, = self.ax.plot([],[], color='green', linestyle='--') - self.dirm, = self.ax.plot([], []) - self.dirs, = self.ax.plot([], []) - - self.line_x, = self.ax2.plot([],[]) - self.line_y, = self.ax3.plot([], []) - - self.track_line_inner, = self.ax.plot([], []) - self.track_line_outer, = self.ax.plot([], []) - - self.ax.set_xlabel('x-position') - self.ax.set_ylabel('y-position') - self.ax.grid() - - self.ax2.set_xlabel('Zeit t') - self.ax2.set_ylabel('x-position') - self.ax2.grid() - - self.ax3.set_xlabel('Zeit t') - self.ax3.set_ylabel('y-position') - self.ax3.grid() - self.mstep = 2 self.ols = OpenLoopSolver(N=20, T=1.0) self.ols.setup() @@ -194,96 +173,10 @@ class RemoteController: # integrator self.r = scipy.integrate.ode(f_ode) self.omega_max = 5.0 - self.control_scaling = 0.3 + self.control_scaling = 0.1 #self.omega_max = 13.32 - def ani(self): - - print("starting animation") - self.ani = anim.FuncAnimation(self.fig, init_func=self.ani_init, func=self.ani_update, interval=10, blit=True) - plt.ion() - plt.show(block=True) - - def ani_init(self): - self.ax.set_xlim(-2, 2) - self.ax.set_ylim(-2, 2) - self.ax.set_aspect('equal', adjustable='box') - - self.ax2.set_ylim(-2, 2) - self.ax2.set_xlim(0, 10) - - self.ax3.set_ylim(-2, 2) - self.ax3.set_xlim(0, 10) - - self.track_line_inner.set_data(self.track.inner_poly.exterior.xy) - self.track_line_outer.set_data(self.track.outer_poly.exterior.xy) - - return self.line, self.line_sim, self.dirm, self.dirs, self.line_ol,\ - self.track_line_inner, self.track_line_outer, self.line_x,self.line_y, - - def ani_update(self, frame): - if self.anim_stopped: - self.ani.event_source.stop() - sys.exit(0) - - #print("plotting") - self.mutex.acquire() - try: - # copy data for plot from global arrays - if self.tms is not None: - tm_local = deepcopy(self.tms) - xm_local = deepcopy(self.xms) - - if len(tm_local) > 0: - # plot path of the robot - self.line.set_data(xm_local[:,0], xm_local[:,1]) - - # compute and plot direction the robot is facing - a = xm_local[-1, 0] - b = xm_local[-1, 1] - - a2 = a + np.cos(xm_local[-1, 2]) * 0.2 - b2 = b + np.sin(xm_local[-1, 2]) * 0.2 - - self.dirm.set_data(np.array([a, a2]), np.array([b, b2])) - - n_plot = 300 - if len(tm_local) > n_plot: - # plot x and y coordinate - self.line_x.set_data(tm_local[-n_plot:] - (tm_local[-1] - 10), xm_local[-n_plot:,0]) - self.line_y.set_data(tm_local[-n_plot:] - (tm_local[-1] - 10), xm_local[-n_plot:, 1]) - - ts_local = deepcopy(self.ts) - xs_local = deepcopy(self.xs) - - if len(ts_local) > 0: - # plot simulated path of the robot - self.line_sim.set_data(xs_local[:,0], xs_local[:,1]) - - # compute and plot direction the robot is facing - a = xs_local[-1, 0] - b = xs_local[-1, 1] - - a2 = a + np.cos(xs_local[-1, 2]) * 0.2 - b2 = b + np.sin(xs_local[-1, 2]) * 0.2 - - self.dirs.set_data(np.array([a, a2]), np.array([b, b2])) - - ol_x_local = deepcopy(self.ol_x) - ol_y_local = deepcopy(self.ol_y) - - if ol_x_local is not None: - self.line_ol.set_data(ol_x_local, ol_y_local) - else: - self.line_ol.set_data([],[]) - - finally: - self.mutex.release() - - return self.line, self.line_sim, self.dirm, self.dirs, self.line_ol, self.track_line_inner, self.track_line_outer,\ - self.line_x, self.line_y, - def measurement_callback(self, data): #print(data) @@ -330,7 +223,7 @@ class RemoteController: inputs = data.split(b',') cmd = inputs[0] - cmd = cmd.strip() + cmd = cmd.strip().decode() if len(inputs) > 1: if cmd in self.valid_cmds: @@ -341,13 +234,14 @@ class RemoteController: clientsocket.sendall(b'Could not read robot id!\n') if robot_id in self.robot_ids: - if cmd == 'get position': + if cmd == b'get position': clientsocket.sendall(bytes('{}\n'.format(self.robot_ids[robot_id].grid_pos))) - elif cmd == 'set position': + elif cmd == b'set position': try: pos_data = ",".join(inputs[2:]) new_grid_pos = tuple(map(lambda x: int(x[1]) if x[0] < 2 else float(x[1]), enumerate(pos_data.strip().strip('()').split(',')))) self.robot_ids[robot_id].grid_pos = new_grid_pos + clientsocket.sendall(b'OK\n') except IndexError as e: print("could not set grid position!") clientsocket.sendall(bytes( @@ -366,7 +260,7 @@ class RemoteController: else: clientsocket.sendall(b'Invalid command!\n') else: # len(inputs) <= 1 - if 'quit' in inputs[0]: + if b'quit' in inputs[0]: clientsocket.close() self.comm_socket.close() connected = False @@ -375,8 +269,6 @@ class RemoteController: print("could not process command!") clientsocket.sendall(b'Could not process request!\n') - - except RuntimeError: print("disconnected") connected = False @@ -418,7 +310,9 @@ class RemoteController: grid_pos = (new_x, new_y, new_angle) print("new grid pos for robot {}: {}\n".format(robot_id, grid_pos)) - self.target = np.array((0.25 * grid_pos[0], 0.25 * grid_pos[1], grid_pos[2])) + target_pos = self.estimator.get_pos_from_grid_point(new_x, new_y) + self.target = np.array((target_pos[0], target_pos[1], grid_pos[2])) + #np.array((0.25 * grid_pos[0], 0.25 * grid_pos[1], grid_pos[2])) self.pid = False self.mpc = True @@ -464,7 +358,8 @@ class RemoteController: return if self.mpc: - x_pred = self.get_measurement_prediction(robot_id) + #x_pred = self.get_measurement_prediction(robot_id) + x_pred = self.get_measurement() tmpc_start = time.time() @@ -474,7 +369,8 @@ class RemoteController: #print("error pos = ", error_pos) #print("error_pos = {}, error_ang = {}".format(error_pos, error_ang)) - if error_pos > 0.075 or error_ang > 0.35: + #if error_pos > 0.075 or error_ang > 0.35: + if error_pos > 0.05 or error_ang > 0.2: # solve mpc open loop problem res = self.ols.solve(x_pred, self.target) @@ -510,7 +406,7 @@ class RemoteController: u1 = us1[i] u2 = us2[i] if self.robot_ids[robot_id].socket: - self.robot_ids[robot_id].socket.send('({},{})\n'.format(u1, u2)) + self.robot_ids[robot_id].socket.send('({},{})\n'.format(u1, u2).encode()) if i < self.mstep: time.sleep(self.dt) self.t = time.time() # save time the most recent control was applied @@ -538,7 +434,7 @@ class RemoteController: return x_pred - def get_measurement(self): + def get_measurement_old(self): self.mutex.acquire() try: last_measurement = copy.deepcopy(self.xms[-1:]) @@ -546,6 +442,9 @@ class RemoteController: self.mutex.release() return last_measurement[0] + def get_measurement(self): + return np.array(self.estimator.get_robot_state_estimate(12)) + def pos_getter(self): while True: x_pred = self.get_measurement_prediction() @@ -562,7 +461,7 @@ def main(args): ip = args.ip - rospy.init_node('controller_node', anonymous=True) + #rospy.init_node('controller_node', anonymous=True) rc = RemoteController(marker_id, ip) @@ -576,6 +475,10 @@ def main(args): # while not rc.track.track_complete: # pass + + + time.sleep(1) + #threading.Thread(target=rc.input_handling).start() controller_thread = threading.Thread(target=rc.controller) controller_thread.start()