replaced cv2.aruco with python bindings for new aruco version

This commit is contained in:
Simon Pirkelmann 2020-10-23 20:43:45 +02:00
parent 0bc6b68a20
commit 280fb10427

View File

@ -1,13 +1,16 @@
import pyrealsense2 as rs import pyrealsense2 as rs
import numpy as np import numpy as np
import cv2 import cv2
from cv2 import aruco import os
from shapely.geometry import LineString from shapely.geometry import LineString
from queue import Queue from queue import Queue
class ArucoEstimator: import aruco
class ArucoEstimator(socketserver.BaseRequestHandler):
corner_marker_ids = { corner_marker_ids = {
'a': 0, 'a': 15,
'b': 1, 'b': 1,
'c': 2, 'c': 2,
'd': 3 'd': 3
@ -59,6 +62,25 @@ class ArucoEstimator:
self.cv_camera = cv2.VideoCapture(0) self.cv_camera = cv2.VideoCapture(0)
self.pipeline = None self.pipeline = None
# create detector and get parameters
self.detector = aruco.MarkerDetector()
#self.detector.setDictionary('ARUCO_MIP_36h12')
#self.detector.setDictionary('ARUCO_MIP_16h3')
#self.detector.setDictionary('ARUCO')
#self.detector.setDetectionMode(aruco.DM_NORMAL, 0.05)
#self.detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.05)
self.detector_params = self.detector.getParameters()
# print detector parameters
print("detector params:")
for val in dir(self.detector_params):
if not val.startswith("__"):
print("\t{} : {}".format(val, self.detector_params.__getattribute__(val)))
self.camparam = aruco.CameraParameters()
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml"))
def mouse_callback(self, event, px, py, flags, param): def mouse_callback(self, event, px, py, flags, param):
if event == 1: if event == 1:
if self.all_corners_detected(): if self.all_corners_detected():
@ -107,38 +129,39 @@ class ArucoEstimator:
gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
#aruco_dict = aruco.Dictionary_get(aruco.DICT_APRILTAG_16H5) # run aruco marker detection
#aruco_dict = aruco.Dictionary_get(aruco.DICT_7X7_50) detected_markers = self.detector.detect(gray)
#aruco_dict = aruco.Dictionary_get(aruco.DICT_4X4_50)
aruco_dict = aruco.Dictionary_get(aruco.DICT_5X5_1000)
#aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL) # fast
parameters = aruco.DetectorParameters_create()
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, aruco_dict, parameters=parameters)
frame = aruco.drawDetectedMarkers(color_image.copy(), corners, ids)
if ids is not None: # extract data for detected markers
for id, corner in zip(ids, corners): detected_marker_data = {}
if id in self.corner_marker_ids.values(): for marker in detected_markers:
marker_size = 0.1 detected_marker_data[marker.id] = {'center': marker.getCenter()}
if marker.id > 0:
if marker.id in self.corner_marker_ids.values():
marker.calculateExtrinsics(0.1, self.camparam)
else: else:
marker_size = 0.07 marker.calculateExtrinsics(0.07, self.camparam)
corner_pixel_coord = np.mean(corner[0], axis=0) detected_marker_data[marker.id]['Rvec'] = marker.Rvec
# res = aruco.estimatePoseSingleMarkers(corner, marker_size, self.camera_matrix, self.dist_coeffs) detected_marker_data[marker.id]['Tvec'] = marker.Tvec
# rvecs = res[0][0][0]
# tvecs = res[1][0][0]
#
# self.update_estimate(id[0], corner_pixel_coord, rvecs, tvecs)
frame = self.draw_grid_lines(frame, corners, ids) if marker.id > 0: # draw markers onto the image
frame = self.draw_robot_pos(frame, corners, ids) marker.draw(color_image, np.array([255, 255, 255]), 2, True)
aruco.CvDrawingUtils.draw3dAxis(color_image, self.camparam, marker.Rvec, marker.Tvec, .1)
# store data
for id, data in detected_marker_data.items():
self.update_estimate(id, data['center'], data['Rvec'], data['Tvec'])
# draw grid
color_image = self.draw_grid_lines(color_image, detected_marker_data)
color_image = self.draw_robot_pos(color_image, detected_marker_data)
# Show images # Show images
cv2.imshow('RoboRally', frame) cv2.imshow('RoboRally', color_image)
key = cv2.waitKey(1) key = cv2.waitKey(1)
if key > 0: if key > 0:
self.event_queue.put(('key', key)) self.event_queue.put(('key', key))
print('key = ', key)
if key == ord('q'): if key == ord('q'):
running = False running = False
finally: finally:
@ -147,6 +170,7 @@ class ArucoEstimator:
# Stop streaming # Stop streaming
self.pipeline.stop() self.pipeline.stop()
def update_estimate(self, id, pixel_coord, rvec, tvec): def update_estimate(self, id, pixel_coord, rvec, tvec):
# update the marker estimate with new data # update the marker estimate with new data
if id in self.corner_marker_ids.values(): if id in self.corner_marker_ids.values():
@ -156,8 +180,9 @@ class ArucoEstimator:
old_estimate = self.corner_estimates[corner]['real_world_estimate'] old_estimate = self.corner_estimates[corner]['real_world_estimate']
n_estimates = self.corner_estimates[corner]['n_estimates'] n_estimates = self.corner_estimates[corner]['n_estimates']
tvec_proj = tvec[0:2] # projection to get rid of z coordinate x = tvec[0][0]
tvec_proj = np.array((tvec_proj[0], -tvec_proj[1])) # flip y coordinate y = -tvec[1][0] # flip y coordinate
tvec_proj = np.array([x, y])
if old_estimate is not None: if old_estimate is not None:
new_estimate = (n_estimates * old_estimate + tvec_proj) / (n_estimates + 1) # weighted update new_estimate = (n_estimates * old_estimate + tvec_proj) / (n_estimates + 1) # weighted update
else: else:
@ -169,8 +194,8 @@ class ArucoEstimator:
elif id in self.robot_marker_ids: elif id in self.robot_marker_ids:
# for robot markers we extract x and y position as well as the angle # for robot markers we extract x and y position as well as the angle
# here we could also implement a filter # here we could also implement a filter
x = tvec[0] x = tvec[0][0]
y = -tvec[1] # flip y coordinate y = -tvec[1][0] # flip y coordinate
# compute angle # compute angle
rot_mat, _ = cv2.Rodrigues(rvec) rot_mat, _ = cv2.Rodrigues(rvec)
@ -266,16 +291,12 @@ class ArucoEstimator:
thickness=2) thickness=2)
return frame return frame
def draw_grid_lines(self, frame, corners, ids): def draw_grid_lines(self, frame, detected_marker_data):
# draws a grid onto the given frame # draws a grid onto the given frame
board_corners_pixel_coords = {} board_corners_pixel_coords = {}
for corner, id in self.corner_marker_ids.items(): for corner, id in self.corner_marker_ids.items():
try: if id in detected_marker_data.keys():
ind, _ = np.where(ids == id) # find index board_corners_pixel_coords[corner] = tuple(detected_marker_data[id]['center'])
ind = ind[0]
board_corners_pixel_coords[corner] = tuple(np.mean(corners[ind][0], axis=0))
except IndexError:
pass
frame = self.draw_corner_line(frame, 'a', 'b', board_corners_pixel_coords) frame = self.draw_corner_line(frame, 'a', 'b', board_corners_pixel_coords)
frame = self.draw_corner_line(frame, 'b', 'c', board_corners_pixel_coords) frame = self.draw_corner_line(frame, 'b', 'c', board_corners_pixel_coords)
@ -318,16 +339,12 @@ class ArucoEstimator:
print(f"error: invalid robot id {id}") print(f"error: invalid robot id {id}")
return None return None
def draw_robot_pos(self, frame, corners, ids): def draw_robot_pos(self, frame, detected_marker_data):
# draws information about the robot positions onto the given frame # draws information about the robot positions onto the given frame
robot_corners_pixel_coords = {} robot_corners_pixel_coords = {}
for id, estimate in self.robot_marker_estimates.items(): for id, estimate in self.robot_marker_estimates.items():
try: if id in detected_marker_data.keys():
ind, _ = np.where(ids == id) # find index robot_corners_pixel_coords[id] = tuple(detected_marker_data[id]['center'])
ind = ind[0]
robot_corners_pixel_coords[id] = tuple(np.mean(corners[ind][0], axis=0))
except IndexError:
pass
for id, coord in robot_corners_pixel_coords.items(): for id, coord in robot_corners_pixel_coords.items():
x = self.robot_marker_estimates[id][0] x = self.robot_marker_estimates[id][0]
@ -339,5 +356,4 @@ class ArucoEstimator:
if __name__ == "__main__": if __name__ == "__main__":
estimator = ArucoEstimator(use_realsense=False) estimator = ArucoEstimator(use_realsense=False)
estimator.run_tracking() estimator.run_tracking()