diff --git a/remote_control/opencv_viewer_example.py b/remote_control/opencv_viewer_example.py index 372fe12..156e852 100644 --- a/remote_control/opencv_viewer_example.py +++ b/remote_control/opencv_viewer_example.py @@ -1,13 +1,16 @@ import pyrealsense2 as rs import numpy as np import cv2 -from cv2 import aruco +import os + from shapely.geometry import LineString from queue import Queue -class ArucoEstimator: +import aruco + +class ArucoEstimator(socketserver.BaseRequestHandler): corner_marker_ids = { - 'a': 0, + 'a': 15, 'b': 1, 'c': 2, 'd': 3 @@ -59,6 +62,25 @@ class ArucoEstimator: self.cv_camera = cv2.VideoCapture(0) self.pipeline = None + # create detector and get parameters + self.detector = aruco.MarkerDetector() + #self.detector.setDictionary('ARUCO_MIP_36h12') + #self.detector.setDictionary('ARUCO_MIP_16h3') + #self.detector.setDictionary('ARUCO') + #self.detector.setDetectionMode(aruco.DM_NORMAL, 0.05) + #self.detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.05) + + self.detector_params = self.detector.getParameters() + + # print detector parameters + print("detector params:") + for val in dir(self.detector_params): + if not val.startswith("__"): + print("\t{} : {}".format(val, self.detector_params.__getattribute__(val))) + + self.camparam = aruco.CameraParameters() + self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml")) + def mouse_callback(self, event, px, py, flags, param): if event == 1: if self.all_corners_detected(): @@ -107,38 +129,39 @@ class ArucoEstimator: gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY) - #aruco_dict = aruco.Dictionary_get(aruco.DICT_APRILTAG_16H5) - #aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_50) - #aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50) - aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_1000) - #aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL) # fast - parameters = aruco.DetectorParameters_create() - corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters) - frame = aruco.drawDetectedMarkers(color_image.copy(), corners, ids) + # run aruco marker detection + detected_markers = self.detector.detect(gray) - if ids is not None: - for id, corner in zip(ids, corners): - if id in self.corner_marker_ids.values(): - marker_size = 0.1 + # extract data for detected markers + detected_marker_data = {} + for marker in detected_markers: + detected_marker_data[marker.id] = {'center': marker.getCenter()} + if marker.id > 0: + if marker.id in self.corner_marker_ids.values(): + marker.calculateExtrinsics(0.1, self.camparam) else: - marker_size = 0.07 - corner_pixel_coord = np.mean(corner[0], axis=0) - # res = aruco.estimatePoseSingleMarkers(corner, marker_size, self.camera_matrix, self.dist_coeffs) - # rvecs = res[0][0][0] - # tvecs = res[1][0][0] - # - # self.update_estimate(id[0], corner_pixel_coord, rvecs, tvecs) + marker.calculateExtrinsics(0.07, self.camparam) + detected_marker_data[marker.id]['Rvec'] = marker.Rvec + detected_marker_data[marker.id]['Tvec'] = marker.Tvec - frame = self.draw_grid_lines(frame, corners, ids) - frame = self.draw_robot_pos(frame, corners, ids) + if marker.id > 0: # draw markers onto the image + marker.draw(color_image, np.array([255, 255, 255]), 2, True) + aruco.CvDrawingUtils.draw3dAxis(color_image, self.camparam, marker.Rvec, marker.Tvec, .1) + + # store data + for id, data in detected_marker_data.items(): + self.update_estimate(id, data['center'], data['Rvec'], data['Tvec']) + + # draw grid + color_image = self.draw_grid_lines(color_image, detected_marker_data) + color_image = self.draw_robot_pos(color_image, detected_marker_data) # Show images - cv2.imshow('RoboRally', frame) + cv2.imshow('RoboRally', color_image) key = cv2.waitKey(1) if key > 0: self.event_queue.put(('key', key)) - print('key = ', key) if key == ord('q'): running = False finally: @@ -147,6 +170,7 @@ class ArucoEstimator: # Stop streaming self.pipeline.stop() + def update_estimate(self, id, pixel_coord, rvec, tvec): # update the marker estimate with new data if id in self.corner_marker_ids.values(): @@ -156,8 +180,9 @@ class ArucoEstimator: old_estimate = self.corner_estimates[corner]['real_world_estimate'] n_estimates = self.corner_estimates[corner]['n_estimates'] - tvec_proj = tvec[0:2] # projection to get rid of z coordinate - tvec_proj = np.array((tvec_proj[0], -tvec_proj[1])) # flip y coordinate + x = tvec[0][0] + y = -tvec[1][0] # flip y coordinate + tvec_proj = np.array([x, y]) if old_estimate is not None: new_estimate = (n_estimates * old_estimate + tvec_proj) / (n_estimates + 1) # weighted update else: @@ -169,8 +194,8 @@ class ArucoEstimator: elif id in self.robot_marker_ids: # for robot markers we extract x and y position as well as the angle # here we could also implement a filter - x = tvec[0] - y = -tvec[1] # flip y coordinate + x = tvec[0][0] + y = -tvec[1][0] # flip y coordinate # compute angle rot_mat, _ = cv2.Rodrigues(rvec) @@ -266,16 +291,12 @@ class ArucoEstimator: thickness=2) return frame - def draw_grid_lines(self, frame, corners, ids): + def draw_grid_lines(self, frame, detected_marker_data): # draws a grid onto the given frame board_corners_pixel_coords = {} for corner, id in self.corner_marker_ids.items(): - try: - ind, _ = np.where(ids == id) # find index - ind = ind[0] - board_corners_pixel_coords[corner] = tuple(np.mean(corners[ind][0], axis=0)) - except IndexError: - pass + if id in detected_marker_data.keys(): + board_corners_pixel_coords[corner] = tuple(detected_marker_data[id]['center']) frame = self.draw_corner_line(frame, 'a', 'b', board_corners_pixel_coords) frame = self.draw_corner_line(frame, 'b', 'c', board_corners_pixel_coords) @@ -318,16 +339,12 @@ class ArucoEstimator: print(f"error: invalid robot id {id}") return None - def draw_robot_pos(self, frame, corners, ids): + def draw_robot_pos(self, frame, detected_marker_data): # draws information about the robot positions onto the given frame robot_corners_pixel_coords = {} for id, estimate in self.robot_marker_estimates.items(): - try: - ind, _ = np.where(ids == id) # find index - ind = ind[0] - robot_corners_pixel_coords[id] = tuple(np.mean(corners[ind][0], axis=0)) - except IndexError: - pass + if id in detected_marker_data.keys(): + robot_corners_pixel_coords[id] = tuple(detected_marker_data[id]['center']) for id, coord in robot_corners_pixel_coords.items(): x = self.robot_marker_estimates[id][0] @@ -339,5 +356,4 @@ class ArucoEstimator: if __name__ == "__main__": estimator = ArucoEstimator(use_realsense=False) - estimator.run_tracking()