added options to run detection on inverted markers and to disable auto-exposure

This commit is contained in:
Simon Pirkelmann 2020-11-08 16:29:48 +01:00
parent 119d857531
commit 993d4c0141

View File

@ -9,7 +9,6 @@ from queue import Queue
import aruco
class ArucoEstimator:
corner_marker_ids = {
'a': 0,
@ -25,8 +24,7 @@ class ArucoEstimator:
'd': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
}
def __init__(self, robot_marker_ids=None, use_realsense=True, grid_columns=8, grid_rows=8,
draw_marker_coordinate_system=False):
def __init__(self, robot_marker_ids=None, use_realsense=True, grid_columns=8, grid_rows=8):
self.grid_columns = grid_columns
self.grid_rows = grid_rows
@ -36,21 +34,24 @@ class ArucoEstimator:
self.robot_marker_estimates = dict([(marker_id, {'t': None, 'x': None, 'y': None, 'angle': None})
for marker_id in self.robot_marker_ids])
self.draw_grid = False
self.event_queue = Queue()
self.draw_marker_coordinate_system = draw_marker_coordinate_system
if use_realsense: # check if realsense camera is connected
# Configure depth and color streams
self.pipeline = rs.pipeline()
config = rs.config()
# config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
#config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
# config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
self.pipeline.start(config)
# disable auto exposure
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
color_sensor.set_option(rs.option.enable_auto_exposure, False)
camera_intrinsics = self.pipeline.get_active_profile().get_stream(
rs.stream.color).as_video_stream_profile().get_intrinsics()
self.camera_matrix = np.zeros((3, 3))
@ -71,7 +72,7 @@ class ArucoEstimator:
#self.detector.setDictionary('ARUCO_MIP_16h3')
# self.detector.setDictionary('ARUCO')
#self.detector.setDetectionMode(aruco.DM_NORMAL, 0.05)
# self.detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.05)
self.detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.05)
self.detector_params = self.detector.getParameters()
@ -82,7 +83,11 @@ class ArucoEstimator:
print("\t{} : {}".format(val, self.detector_params.__getattribute__(val)))
self.camparam = aruco.CameraParameters()
if use_realsense:
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "realsense.yml"))
else:
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml"))
print(self.camparam)
def mouse_callback(self, event, px, py, flags, param):
"""
@ -120,7 +125,7 @@ class ArucoEstimator:
else:
print("not all markers have been detected!")
def run_tracking(self):
def run_tracking(self, draw_markers=True, draw_marker_coordinate_system=False, invert_grayscale=False):
"""
Run the marker tracking in a loop
"""
@ -143,6 +148,8 @@ class ArucoEstimator:
t_image = time.time()
gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
if invert_grayscale:
cv2.bitwise_not(gray, gray)
# run aruco marker detection
detected_markers = self.detector.detect(gray)
@ -151,7 +158,7 @@ class ArucoEstimator:
detected_marker_data = {}
for marker in detected_markers:
detected_marker_data[marker.id] = {'marker_center': marker.getCenter()}
if marker.id > 0:
if marker.id >= 0:
if marker.id in self.corner_marker_ids.values():
marker.calculateExtrinsics(0.1, self.camparam)
else:
@ -159,10 +166,11 @@ class ArucoEstimator:
detected_marker_data[marker.id]['Rvec'] = marker.Rvec
detected_marker_data[marker.id]['Tvec'] = marker.Tvec
if marker.id > 0: # draw markers onto the image
if marker.id >= 0: # draw markers onto the image
if draw_markers:
marker.draw(color_image, np.array([255, 255, 255]), 2, True)
if self.draw_marker_coordinate_system:
if draw_marker_coordinate_system:
aruco.CvDrawingUtils.draw3dAxis(color_image, self.camparam, marker.Rvec, marker.Tvec, .1)
# store data
@ -170,6 +178,7 @@ class ArucoEstimator:
self.update_estimate(marker_id, data['marker_center'], data['Rvec'], data['Tvec'], t_image)
# draw grid
if self.draw_grid:
color_image = self.draw_grid_lines(color_image, detected_marker_data)
color_image = self.draw_robot_pos(color_image, detected_marker_data)
@ -179,8 +188,18 @@ class ArucoEstimator:
if key > 0:
self.event_queue.put(('key', key))
if key == ord('g'):
self.draw_grid = not self.draw_grid
if key == ord('q'):
running = False
if key == ord('a'):
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
if color_sensor.get_option(rs.option.enable_auto_exposure) == 1.0:
color_sensor.set_option(rs.option.enable_auto_exposure, False)
print("auto exposure OFF")
else:
color_sensor.set_option(rs.option.enable_auto_exposure, True)
print("auto exposure ON")
finally:
cv2.destroyAllWindows()
if self.pipeline is not None:
@ -309,7 +328,7 @@ class ArucoEstimator:
corner_1_center = self.corner_estimates[corner_1]['pixel_coordinate']
corner_2_center = self.corner_estimates[corner_2]['pixel_coordinate']
if corner_1_center is not None and corner_2_center is not None:
frame = cv2.line(frame, corner_1_center, corner_2_center, color=(0, 0, 255), thickness=2)
frame = cv2.line(frame, tuple(corner_1_center), tuple(corner_2_center), color=(0, 0, 255), thickness=2)
return frame
def draw_grid_lines(self, frame, detected_marker_data):
@ -364,17 +383,18 @@ class ArucoEstimator:
robot_corners_pixel_coords = {}
for marker_id, estimate in self.robot_marker_estimates.items():
if marker_id in detected_marker_data.keys():
robot_corners_pixel_coords[marker_id] = tuple(detected_marker_data[marker_id]['center'])
robot_corners_pixel_coords[marker_id] = tuple(detected_marker_data[marker_id]['marker_center'])
for marker_id, coord in robot_corners_pixel_coords.items():
x = self.robot_marker_estimates[marker_id]['x']
y = self.robot_marker_estimates[marker_id]['y']
angle = self.robot_marker_estimates[marker_id]['angle']
frame = cv2.putText(frame, "pos = ({:5.3f}, {:5.3f}), ang = {:5.3f}".format(x, y, angle), coord,
cv2.FONT_HERSHEY_SIMPLEX, 0.50, (0, 255, 0))
#frame = cv2.putText(frame, "pos = ({:5.3f}, {:5.3f}), ang = {:5.3f}".format(x, y, angle), coord,
# cv2.FONT_HERSHEY_SIMPLEX, 0.50, (0, 255, 0))
frame = cv2.putText(frame, f"{marker_id}", coord, cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255), thickness=4)
return frame
if __name__ == "__main__":
estimator = ArucoEstimator(use_realsense=False, draw_marker_coordinate_system=True)
estimator.run_tracking()
estimator = ArucoEstimator(use_realsense=True, robot_marker_ids=[11, 12, 13, 14])
estimator.run_tracking(draw_markers=True, invert_grayscale=True)