diff --git a/remote_control/opencv_viewer_example.py b/remote_control/opencv_viewer_example.py index cf310da..6c503a8 100644 --- a/remote_control/opencv_viewer_example.py +++ b/remote_control/opencv_viewer_example.py @@ -1,21 +1,13 @@ -## License: Apache 2.0. See LICENSE file in root directory. -## Copyright(c) 2015-2017 Intel Corporation. All Rights Reserved. - -############################################### -## Open CV and Numpy integration ## -############################################### - import pyrealsense2 as rs import numpy as np import cv2 from cv2 import aruco from shapely.geometry import LineString -import time - DS5_product_ids = ["0AD1", "0AD2", "0AD3", "0AD4", "0AD5", "0AF6", "0AFE", "0AFF", "0B00", "0B01", "0B03", "0B07","0B3A"] -def find_device_that_supports_advanced_mode() : + +def find_device_that_supports_advanced_mode(): ctx = rs.context() ds5_dev = rs.device() devices = ctx.query_devices() @@ -26,6 +18,7 @@ def find_device_that_supports_advanced_mode() : return dev raise Exception("No device that supports advanced mode was found") + class ArucoEstimator: grid_columns = 10 grid_rows = 8 @@ -36,9 +29,6 @@ class ArucoEstimator: 'd': 3 } - robot_marker_ids = [12] - - robot_marker_estimates = dict([(id, None) for id in robot_marker_ids]) angles = [] corner_estimates = { @@ -48,8 +38,11 @@ class ArucoEstimator: 'd': (None, 0) } - def __init__(self): - if True: # check if realsense camera is connected + def __init__(self, robot_marker_ids=[]): + self.robot_marker_ids = robot_marker_ids + self.robot_marker_estimates = dict([(id, None) for id in self.robot_marker_ids]) + + if False: # check if realsense camera is connected # Configure depth and color streams self.pipeline = rs.pipeline() config = rs.config() @@ -75,9 +68,6 @@ class ArucoEstimator: self.cv_camera = cv2.VideoCapture(0) self.pipeline = None - # array containing pose estimates for each marker - estimates = {} - def run_tracking(self): try: while True: @@ -100,16 +90,27 @@ class ArucoEstimator: corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters) frame = aruco.drawDetectedMarkers(color_image.copy(), corners, ids) - if ids is not None: + if False:#ids is not None: for id, c in zip(ids, corners): res = aruco.estimatePoseSingleMarkers(c, 0.10, self.camera_matrix, self.dist_coeffs) - rvecs = res[0] - tvecs = res[1] + rvecs = res[0][0][0] + tvecs = res[1][0][0] self.update_estimate(id, rvecs, tvecs) frame = self.draw_grid_lines(frame, corners, ids) frame = self.draw_robot_pos(frame, corners, ids) + else: + # pretent we detected some markers + pass + self.update_estimate(0, rvec=np.array([0,0,0]), tvec=np.array([-1, 1, 0])) + self.update_estimate(1, rvec=np.array([0, 0, 0]), tvec=np.array([1, 1.5, 0])) + self.update_estimate(2, rvec=np.array([0, 0, 0]), tvec=np.array([1, -1.5, 0])) + self.update_estimate(3, rvec=np.array([0, 0, 0]), tvec=np.array([-1, -1, 0])) + #import random + #self.update_estimate(12, rvec=np.array([0.0, 0.0, 0.0]), tvec=np.array([-1.0 + random.random() * 2, -1.0 + random.random() * 2, 0])) + self.update_estimate(12, rvec=np.array([0.0, 0.0, 0.0]), + tvec=np.array([1.2, 0.42, 0])) # Show images cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE) @@ -117,15 +118,9 @@ class ArucoEstimator: cv2.waitKey(1) finally: - # Stop streaming - self.pipeline.stop() - - - # import matplotlib.pyplot as plt - - # plt.plot(playboard.angles) - # plt.show() - + if self.pipeline is not None: + # Stop streaming + self.pipeline.stop() def update_estimate(self, id, rvec, tvec): # update the marker estimate with new data @@ -135,7 +130,7 @@ class ArucoEstimator: corner = next(filter(lambda key: self.corner_marker_ids[key] == id, self.corner_marker_ids.keys())) # get corresponding corner to the detected marker old_estimate = self.corner_estimates[corner][0] n_estimates = self.corner_estimates[corner][1] - tvec_proj = tvec[0][0][0:2] # projection to get rid of z coordinate + tvec_proj = tvec[0:2] # projection to get rid of z coordinate tvec_proj = np.array((tvec_proj[0], -tvec_proj[1])) # flip y coordinate if old_estimate is not None: new_estimate = (n_estimates * old_estimate + tvec_proj) / (n_estimates + 1) # weighted update @@ -145,17 +140,17 @@ class ArucoEstimator: elif id in self.robot_marker_ids: # for robot markers we extract x and y position as well as the angle # here we could also implement a filter - x = tvec[0][0][0] - y = -tvec[0][0][1] # flip y coordinate + x = tvec[0] + y = -tvec[1] # flip y coordinate # compute angle - rot_mat, _ = cv2.Rodrigues(rvec[0][0]) - pose_mat = cv2.hconcat((rot_mat, tvec[0][0])) + rot_mat, _ = cv2.Rodrigues(rvec) + pose_mat = cv2.hconcat((rot_mat, tvec)) _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat) angle = -euler_angles[2][0] * np.pi / 180.0 self.angles.append(angle) - self.robot_marker_estimates[id[0]] = (x, y, angle) + self.robot_marker_estimates[id] = (x, y, angle) def all_corners_detected(self): # checks if all corner markers have been detected at least once @@ -308,4 +303,4 @@ class ArucoEstimator: if __name__ == "__main__": - playboard = Board() \ No newline at end of file + estimator = ArucoEstimator() diff --git a/remote_control/roborally.py b/remote_control/roborally.py index f160513..b200232 100644 --- a/remote_control/roborally.py +++ b/remote_control/roborally.py @@ -52,10 +52,21 @@ def myreceive(sock): class Robot: - def __init__(self, id, ip, grid_pos = (0,0,0)): - self.pos = None - self.orient = None - self.grid_pos = grid_pos + # dictionary mapping the current orientation and a turn command to the resulting orientation + resulting_orientation = { + '^': {'turn left': '<', 'turn right': '>', 'turn around': 'v'}, + '>': {'turn left': '^', 'turn right': 'v', 'turn around': '<'}, + 'v': {'turn left': '>', 'turn right': '<', 'turn around': '^'}, + '<': {'turn left': 'v', 'turn right': '^', 'turn around': '>'}, + } + + # dictionary mapping an orientation to its opposite + opposites = {'^': 'v', '>': '<', 'v': '^', '<': '>'} + + def __init__(self, id, ip, x=0, y=0, orientation='>'): + self.x = x + self.y = y + self.orientation = orientation self.id = id @@ -75,17 +86,52 @@ class Robot: self.u1 = 0.0 self.u2 = 0.0 + def get_neighbor_coordinates(self, direction): + # get the coordinates of the neighboring tile in the given direction + if direction == '^': + return (self.x, self.y - 1) + elif direction == '>': + return (self.x + 1, self.y) + elif direction == 'v': + return (self.x, self.y + 1) + elif direction == '<': + return (self.x - 1, self.y) + else: + print("error: unknown direction") + sys.exit(1) + + def move(self, type): + if type == 'forward': + target_tile = self.get_neighbor_coordinates(self.orientation) + self.x = target_tile[0] + self.y = target_tile[1] + elif type == 'backward': + opposite_orientation = Robot.opposites[self.orientation] + target_tile = self.get_neighbor_coordinates(opposite_orientation) + self.x = target_tile[0] + self.y = target_tile[1] + elif 'turn' in type: + self.orientation = Robot.resulting_orientation[self.orientation][type] + else: + print("error: invalid move") + sys.exit(1) def connect(self): # connect to robot try: print("connecting to robot {} with ip {} ...".format(self.id, self.ip)) - self.socket.connect((self.ip, 1234)) # connect to robot + #self.socket.connect((self.ip, 1234)) # connect to robot print("connected!") except socket.error: print("could not connect to robot {} with ip {}".format(self.id, self.ip)) sys.exit(1) + def __str__(self): + return self.__repr__() + + def __repr__(self): + return f"x: {self.x}, y: {self.y}, orienation: {self.orientation}" + def f_ode(t, x, u): # dynamical model of the two-wheeled robot # TODO: find exact values for these parameters @@ -110,9 +156,6 @@ def f_ode(t, x, u): class RemoteController: def __init__(self, id, ip): - - self.anim_stopped = False - # self.robots = #[Robot(11, '192.168.1.11', (6, -3, np.pi)), Robot(12, '192.168.1.12', (6, -3, np.pi)), # Robot(13, '192.168.1.13', (6, -3, np.pi)), Robot(14, '192.168.1.14', (6, -2, np.pi))] #self.robots = [Robot(13, '192.168.1.13', (6, -3, np.pi))] @@ -120,8 +163,6 @@ class RemoteController: #self.robots = [Robot(11, '192.168.1.11', (6,-3,0)), Robot(14, '192.168.1.14', (6,3,0))] #self.robots = [Robot(11, '192.168.1.11'), Robot(14, '192.168.1.14')] self.robots = [Robot(12, '192.168.1.12')] - #self.robots = [Robot(15, '192.168.1.102')] - #self.robots = [Robot(id, ip)] self.robot_ids = {} for r in self.robots: @@ -136,7 +177,6 @@ class RemoteController: for robot in self.robots: robot.connect() - #self.comm_socket.bind((socket.gethostname(), 1337)) self.comm_socket.bind(('', 1337)) self.comm_socket.listen(5) @@ -147,15 +187,12 @@ class RemoteController: self.ts = np.array([]) self.xs = [] - # variable for mpc open loop self.ol_x = None self.ol_y = None - self.mutex = threading.Lock() - # ROS subscriber for detected markers - self.estimator = opencv_viewer_example.ArucoEstimator() + self.estimator = opencv_viewer_example.ArucoEstimator(self.robot_ids.keys()) self.estimator_thread = threading.Thread(target=self.estimator.run_tracking) self.estimator_thread.start() @@ -173,41 +210,13 @@ class RemoteController: # integrator self.r = scipy.integrate.ode(f_ode) self.omega_max = 5.0 - self.control_scaling = 0.1 + self.control_scaling = 0.2 #self.omega_max = 13.32 - - def measurement_callback(self, data): - #print(data) - - # detect robots - if data.id in self.robot_ids: - r = self.robot_ids[data.id] - - r.pos = (data.x, data.y) # only x and y component are important for us - r.euler = data.angle - - # save measured position and angle for plotting - measurement = np.array([r.pos[0], r.pos[1], r.euler]) - if r.tms_0 is None: - r.tms_0 = time.time() - r.xm_0 = measurement - - self.mutex.acquire() - try: - r.tms = np.array([0.0]) - r.xms = measurement - finally: - self.mutex.release() - else: - self.mutex.acquire() - try: - r.tms = np.vstack((r.tms, time.time() - r.tms_0)) - r.xms = np.vstack((r.xms, measurement)) - finally: - self.mutex.release() - def controller(self): + print("waiting until all markers are detected...") + while not self.estimator.all_corners_detected(): + pass print("starting control") running = True @@ -275,181 +284,103 @@ class RemoteController: clientsocket.close() def mpc_control(self, robot_id, cmd): - robot = self.robot_ids[robot_id] # get robot to be controlled - grid_pos = robot.grid_pos # grid position of the robot + robot = self.robot_ids[robot_id] # get robot to be controlled - print("old grid pos for robot {}: {}".format(robot_id, grid_pos)) + print("robot grid pos before move: ", robot) + robot.move(cmd) + print("robot grid pos after move: ", robot) - # compute new grid position and orientation - if cmd == 'forward': - new_x = int(round(grid_pos[0] + 1 * np.cos(grid_pos[2]))) - new_y = int(round(grid_pos[1] + 1 * np.sin(grid_pos[2]))) - new_angle = grid_pos[2] - elif cmd == 'backward': - new_x = int(round(grid_pos[0] - 1 * np.cos(grid_pos[2]))) - new_y = int(round(grid_pos[1] - 1 * np.sin(grid_pos[2]))) - new_angle = grid_pos[2] - elif cmd == 'turn left': - new_x = grid_pos[0] - new_y = grid_pos[1] - new_angle = np.unwrap([0, grid_pos[2] + np.pi / 2])[1] - elif cmd == 'turn right': - new_x = grid_pos[0] - new_y = grid_pos[1] - new_angle = np.unwrap([0, grid_pos[2] - np.pi / 2])[1] - elif cmd == 'turn around': - new_x = grid_pos[0] - new_y = grid_pos[1] - new_angle = np.unwrap([0, grid_pos[2] + np.pi])[1] - else: - print("unknown command!") - sys.exit(1) - if new_x != grid_pos[0] and new_y != grid_pos[1]: - print("problem detected!") + self.target = self.estimator.get_pos_from_grid_point(robot.x, robot.y, robot.orientation) - grid_pos = (new_x, new_y, new_angle) - print("new grid pos for robot {}: {}\n".format(robot_id, grid_pos)) + self.pid = False + self.mpc = True - target_pos = self.estimator.get_pos_from_grid_point(new_x, new_y) - self.target = np.array((target_pos[0], target_pos[1], grid_pos[2])) - #np.array((0.25 * grid_pos[0], 0.25 * grid_pos[1], grid_pos[2])) + near_target = 0 - self.pid = False - self.mpc = True + while near_target < 5: + # open loop controller + events = pygame.event.get() - near_target = 0 - - while near_target < 5: - # open loop controller - events = pygame.event.get() - - for event in events: - if event.type == pygame.KEYDOWN: - if event.key == pygame.K_UP: - self.controlling = True - self.t = time.time() - elif event.key == pygame.K_DOWN: - self.controlling = False - if self.robot_ids[robot_id].socket: - self.robot_ids[robot_id].socket.send('(0.0,0.0)\n') - elif event.key == pygame.K_0: - self.target = np.array([0,0,0]) - elif event.key == pygame.K_PLUS: - self.control_scaling += 0.1 - self.control_scaling = min(self.control_scaling, 1.0) - print("control scaling = ", self.control_scaling) - elif event.key == pygame.K_MINUS: - self.control_scaling -= 0.1 - self.control_scaling = max(self.control_scaling, 0.1) - print("control scaling = ", self.control_scaling) - elif event.key == pygame.K_ESCAPE: - print("quit!") - self.controlling = False - if self.robot_ids[robot_id].socket: - self.robot_ids[robot_id].socket.send('(0.0,0.0)\n') - self.anim_stopped = True - return - elif event.key == pygame.QUIT: - print("quit!") - self.controlling = False - if self.robot_ids[robot_id].socket: - self.robot_ids[robot_id].socket.send('(0.0,0.0)\n') - self.anim_stopped = True - return - - if self.mpc: - #x_pred = self.get_measurement_prediction(robot_id) - x_pred = self.get_measurement() - - tmpc_start = time.time() - - error_pos = np.linalg.norm(x_pred[0:2] - self.target[0:2]) - angles_unwrapped = np.unwrap([x_pred[2], self.target[2]]) # unwrap angle to avoid jump in data - error_ang = np.abs(angles_unwrapped[0] - angles_unwrapped[1]) - #print("error pos = ", error_pos) - #print("error_pos = {}, error_ang = {}".format(error_pos, error_ang)) - - #if error_pos > 0.075 or error_ang > 0.35: - if error_pos > 0.05 or error_ang > 0.2: - # solve mpc open loop problem - res = self.ols.solve(x_pred, self.target) - - #us1 = res[0] - #us2 = res[1] - us1 = res[0] * self.control_scaling - us2 = res[1] * self.control_scaling - #print("u = {}", (us1, us2)) - - # save open loop trajectories for plotting - self.mutex.acquire() - try: - self.ol_x = res[2] - self.ol_y = res[3] - finally: - self.mutex.release() - - tmpc_end = time.time() - #print("---------------- mpc solution took {} seconds".format(tmpc_end - tmpc_start)) - dt_mpc = time.time() - self.t - if dt_mpc < self.dt: # wait until next control can be applied - #print("sleeping for {} seconds...".format(self.dt - dt_mpc)) - time.sleep(self.dt - dt_mpc) - else: - us1 = [0] * self.mstep - us2 = [0] * self.mstep - - near_target += 1 - robot.grid_pos = grid_pos - - # send controls to the robot - for i in range(0, self.mstep): # option to use multistep mpc if len(range) > 1 - u1 = us1[i] - u2 = us2[i] + for event in events: + if event.type == pygame.KEYDOWN: + if event.key == pygame.K_UP: + self.controlling = True + self.t = time.time() + elif event.key == pygame.K_DOWN: + self.controlling = False if self.robot_ids[robot_id].socket: - self.robot_ids[robot_id].socket.send('({},{})\n'.format(u1, u2).encode()) - if i < self.mstep: - time.sleep(self.dt) - self.t = time.time() # save time the most recent control was applied + self.robot_ids[robot_id].socket.send('(0.0,0.0)\n') + elif event.key == pygame.K_0: + self.target = np.array([0,0,0]) + elif event.key == pygame.K_PLUS: + self.control_scaling += 0.1 + self.control_scaling = min(self.control_scaling, 1.0) + print("control scaling = ", self.control_scaling) + elif event.key == pygame.K_MINUS: + self.control_scaling -= 0.1 + self.control_scaling = max(self.control_scaling, 0.1) + print("control scaling = ", self.control_scaling) + elif event.key == pygame.K_ESCAPE: + print("quit!") + self.controlling = False + if self.robot_ids[robot_id].socket: + self.robot_ids[robot_id].socket.send('(0.0,0.0)\n') + self.anim_stopped = True + return + elif event.key == pygame.QUIT: + print("quit!") + self.controlling = False + if self.robot_ids[robot_id].socket: + self.robot_ids[robot_id].socket.send('(0.0,0.0)\n') + self.anim_stopped = True + return + if self.mpc: + x_pred = self.get_measurement(robot_id) - def get_measurement_prediction(self, robot_id): - # get measurement - self.mutex.acquire() - try: - window = 3 - last_measurement = copy.deepcopy(self.robot_ids[robot_id].xms[-window:]) - #print("last_measurements = {}".format(last_measurement)) - #print("mean = {}".format(np.mean(last_measurement, axis=0))) - last_measurement = np.mean(last_measurement, axis=0) - last_time = copy.deepcopy(self.robot_ids[robot_id].tms[-1]) - finally: - self.mutex.release() + tmpc_start = time.time() - # prediction of state at time the mpc will terminate - self.r.set_f_params(np.array([self.robot_ids[robot_id].u1 * self.omega_max, self.robot_ids[robot_id].u2 * self.omega_max])) + error_pos = np.linalg.norm(x_pred[0:2] - self.target[0:2]) + angles_unwrapped = np.unwrap([x_pred[2], self.target[2]]) # unwrap angle to avoid jump in data + error_ang = np.abs(angles_unwrapped[0] - angles_unwrapped[1]) + #print("error pos = ", error_pos) + #print("error_pos = {}, error_ang = {}".format(error_pos, error_ang)) - self.r.set_initial_value(last_measurement, last_time) + #if error_pos > 0.075 or error_ang > 0.35: + if error_pos > 0.05 or error_ang > 0.2: + # solve mpc open loop problem + res = self.ols.solve(x_pred, self.target) - x_pred = self.r.integrate(self.r.t + self.dt) + #us1 = res[0] + #us2 = res[1] + us1 = res[0] * self.control_scaling + us2 = res[1] * self.control_scaling + #print("u = {}", (us1, us2)) - return x_pred + tmpc_end = time.time() + #print("---------------- mpc solution took {} seconds".format(tmpc_end - tmpc_start)) + dt_mpc = time.time() - self.t + if dt_mpc < self.dt: # wait until next control can be applied + #print("sleeping for {} seconds...".format(self.dt - dt_mpc)) + time.sleep(self.dt - dt_mpc) + else: + us1 = [0] * self.mstep + us2 = [0] * self.mstep - def get_measurement_old(self): - self.mutex.acquire() - try: - last_measurement = copy.deepcopy(self.xms[-1:]) - finally: - self.mutex.release() - return last_measurement[0] + near_target += 1 - def get_measurement(self): - return np.array(self.estimator.get_robot_state_estimate(12)) + # send controls to the robot + for i in range(0, self.mstep): # option to use multistep mpc if len(range) > 1 + u1 = us1[i] + u2 = us2[i] + if self.robot_ids[robot_id].socket: + #self.robot_ids[robot_id].socket.send('({},{})\n'.format(u1, u2).encode()) + if i < self.mstep: + time.sleep(self.dt) + self.t = time.time() # save time the most recent control was applied - def pos_getter(self): - while True: - x_pred = self.get_measurement_prediction() - - print("pos = ", x_pred) + def get_measurement(self, robot_id): + return np.array(self.estimator.get_robot_state_estimate(robot_id)) def main(args): parser = ArgumentParser() @@ -460,9 +391,6 @@ def main(args): marker_id = int(args.id) ip = args.ip - - #rospy.init_node('controller_node', anonymous=True) - rc = RemoteController(marker_id, ip) pygame.init() @@ -471,21 +399,7 @@ def main(args): screenwidth = 640 pygame.display.set_mode([screenwidth, screenheight]) - # print("waiting until track is completely detected") - # while not rc.track.track_complete: - # pass - - - - time.sleep(1) - - #threading.Thread(target=rc.input_handling).start() - controller_thread = threading.Thread(target=rc.controller) - controller_thread.start() - - #time.sleep(10) - #rc.ani() - + rc.controller() if __name__ == '__main__': main(sys.argv)