forked from Telos4/RoboRally
added options to run detection on inverted markers and to disable auto-exposure
This commit is contained in:
parent
119d857531
commit
993d4c0141
|
@ -9,7 +9,6 @@ from queue import Queue
|
||||||
|
|
||||||
import aruco
|
import aruco
|
||||||
|
|
||||||
|
|
||||||
class ArucoEstimator:
|
class ArucoEstimator:
|
||||||
corner_marker_ids = {
|
corner_marker_ids = {
|
||||||
'a': 0,
|
'a': 0,
|
||||||
|
@ -25,8 +24,7 @@ class ArucoEstimator:
|
||||||
'd': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
|
'd': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
def __init__(self, robot_marker_ids=None, use_realsense=True, grid_columns=8, grid_rows=8,
|
def __init__(self, robot_marker_ids=None, use_realsense=True, grid_columns=8, grid_rows=8):
|
||||||
draw_marker_coordinate_system=False):
|
|
||||||
self.grid_columns = grid_columns
|
self.grid_columns = grid_columns
|
||||||
self.grid_rows = grid_rows
|
self.grid_rows = grid_rows
|
||||||
|
|
||||||
|
@ -36,21 +34,24 @@ class ArucoEstimator:
|
||||||
self.robot_marker_estimates = dict([(marker_id, {'t': None, 'x': None, 'y': None, 'angle': None})
|
self.robot_marker_estimates = dict([(marker_id, {'t': None, 'x': None, 'y': None, 'angle': None})
|
||||||
for marker_id in self.robot_marker_ids])
|
for marker_id in self.robot_marker_ids])
|
||||||
|
|
||||||
|
self.draw_grid = False
|
||||||
|
|
||||||
self.event_queue = Queue()
|
self.event_queue = Queue()
|
||||||
self.draw_marker_coordinate_system = draw_marker_coordinate_system
|
|
||||||
|
|
||||||
if use_realsense: # check if realsense camera is connected
|
if use_realsense: # check if realsense camera is connected
|
||||||
# Configure depth and color streams
|
# Configure depth and color streams
|
||||||
self.pipeline = rs.pipeline()
|
self.pipeline = rs.pipeline()
|
||||||
config = rs.config()
|
config = rs.config()
|
||||||
# config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
|
#config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
|
||||||
# config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
|
|
||||||
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
|
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
|
||||||
# config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
|
|
||||||
|
|
||||||
# Start streaming
|
# Start streaming
|
||||||
self.pipeline.start(config)
|
self.pipeline.start(config)
|
||||||
|
|
||||||
|
# disable auto exposure
|
||||||
|
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
|
||||||
|
color_sensor.set_option(rs.option.enable_auto_exposure, False)
|
||||||
|
|
||||||
camera_intrinsics = self.pipeline.get_active_profile().get_stream(
|
camera_intrinsics = self.pipeline.get_active_profile().get_stream(
|
||||||
rs.stream.color).as_video_stream_profile().get_intrinsics()
|
rs.stream.color).as_video_stream_profile().get_intrinsics()
|
||||||
self.camera_matrix = np.zeros((3, 3))
|
self.camera_matrix = np.zeros((3, 3))
|
||||||
|
@ -68,10 +69,10 @@ class ArucoEstimator:
|
||||||
# create detector and get parameters
|
# create detector and get parameters
|
||||||
self.detector = aruco.MarkerDetector()
|
self.detector = aruco.MarkerDetector()
|
||||||
# self.detector.setDictionary('ARUCO_MIP_36h12')
|
# self.detector.setDictionary('ARUCO_MIP_36h12')
|
||||||
# self.detector.setDictionary('ARUCO_MIP_16h3')
|
#self.detector.setDictionary('ARUCO_MIP_16h3')
|
||||||
# self.detector.setDictionary('ARUCO')
|
# self.detector.setDictionary('ARUCO')
|
||||||
# self.detector.setDetectionMode(aruco.DM_NORMAL, 0.05)
|
#self.detector.setDetectionMode(aruco.DM_NORMAL, 0.05)
|
||||||
# self.detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.05)
|
self.detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.05)
|
||||||
|
|
||||||
self.detector_params = self.detector.getParameters()
|
self.detector_params = self.detector.getParameters()
|
||||||
|
|
||||||
|
@ -82,7 +83,11 @@ class ArucoEstimator:
|
||||||
print("\t{} : {}".format(val, self.detector_params.__getattribute__(val)))
|
print("\t{} : {}".format(val, self.detector_params.__getattribute__(val)))
|
||||||
|
|
||||||
self.camparam = aruco.CameraParameters()
|
self.camparam = aruco.CameraParameters()
|
||||||
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml"))
|
if use_realsense:
|
||||||
|
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "realsense.yml"))
|
||||||
|
else:
|
||||||
|
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml"))
|
||||||
|
print(self.camparam)
|
||||||
|
|
||||||
def mouse_callback(self, event, px, py, flags, param):
|
def mouse_callback(self, event, px, py, flags, param):
|
||||||
"""
|
"""
|
||||||
|
@ -120,7 +125,7 @@ class ArucoEstimator:
|
||||||
else:
|
else:
|
||||||
print("not all markers have been detected!")
|
print("not all markers have been detected!")
|
||||||
|
|
||||||
def run_tracking(self):
|
def run_tracking(self, draw_markers=True, draw_marker_coordinate_system=False, invert_grayscale=False):
|
||||||
"""
|
"""
|
||||||
Run the marker tracking in a loop
|
Run the marker tracking in a loop
|
||||||
"""
|
"""
|
||||||
|
@ -143,6 +148,8 @@ class ArucoEstimator:
|
||||||
t_image = time.time()
|
t_image = time.time()
|
||||||
|
|
||||||
gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
|
gray = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
|
||||||
|
if invert_grayscale:
|
||||||
|
cv2.bitwise_not(gray, gray)
|
||||||
|
|
||||||
# run aruco marker detection
|
# run aruco marker detection
|
||||||
detected_markers = self.detector.detect(gray)
|
detected_markers = self.detector.detect(gray)
|
||||||
|
@ -151,7 +158,7 @@ class ArucoEstimator:
|
||||||
detected_marker_data = {}
|
detected_marker_data = {}
|
||||||
for marker in detected_markers:
|
for marker in detected_markers:
|
||||||
detected_marker_data[marker.id] = {'marker_center': marker.getCenter()}
|
detected_marker_data[marker.id] = {'marker_center': marker.getCenter()}
|
||||||
if marker.id > 0:
|
if marker.id >= 0:
|
||||||
if marker.id in self.corner_marker_ids.values():
|
if marker.id in self.corner_marker_ids.values():
|
||||||
marker.calculateExtrinsics(0.1, self.camparam)
|
marker.calculateExtrinsics(0.1, self.camparam)
|
||||||
else:
|
else:
|
||||||
|
@ -159,10 +166,11 @@ class ArucoEstimator:
|
||||||
detected_marker_data[marker.id]['Rvec'] = marker.Rvec
|
detected_marker_data[marker.id]['Rvec'] = marker.Rvec
|
||||||
detected_marker_data[marker.id]['Tvec'] = marker.Tvec
|
detected_marker_data[marker.id]['Tvec'] = marker.Tvec
|
||||||
|
|
||||||
if marker.id > 0: # draw markers onto the image
|
if marker.id >= 0: # draw markers onto the image
|
||||||
marker.draw(color_image, np.array([255, 255, 255]), 2, True)
|
if draw_markers:
|
||||||
|
marker.draw(color_image, np.array([255, 255, 255]), 2, True)
|
||||||
|
|
||||||
if self.draw_marker_coordinate_system:
|
if draw_marker_coordinate_system:
|
||||||
aruco.CvDrawingUtils.draw3dAxis(color_image, self.camparam, marker.Rvec, marker.Tvec, .1)
|
aruco.CvDrawingUtils.draw3dAxis(color_image, self.camparam, marker.Rvec, marker.Tvec, .1)
|
||||||
|
|
||||||
# store data
|
# store data
|
||||||
|
@ -170,7 +178,8 @@ class ArucoEstimator:
|
||||||
self.update_estimate(marker_id, data['marker_center'], data['Rvec'], data['Tvec'], t_image)
|
self.update_estimate(marker_id, data['marker_center'], data['Rvec'], data['Tvec'], t_image)
|
||||||
|
|
||||||
# draw grid
|
# draw grid
|
||||||
color_image = self.draw_grid_lines(color_image, detected_marker_data)
|
if self.draw_grid:
|
||||||
|
color_image = self.draw_grid_lines(color_image, detected_marker_data)
|
||||||
color_image = self.draw_robot_pos(color_image, detected_marker_data)
|
color_image = self.draw_robot_pos(color_image, detected_marker_data)
|
||||||
|
|
||||||
# Show images
|
# Show images
|
||||||
|
@ -179,8 +188,18 @@ class ArucoEstimator:
|
||||||
|
|
||||||
if key > 0:
|
if key > 0:
|
||||||
self.event_queue.put(('key', key))
|
self.event_queue.put(('key', key))
|
||||||
|
if key == ord('g'):
|
||||||
|
self.draw_grid = not self.draw_grid
|
||||||
if key == ord('q'):
|
if key == ord('q'):
|
||||||
running = False
|
running = False
|
||||||
|
if key == ord('a'):
|
||||||
|
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
|
||||||
|
if color_sensor.get_option(rs.option.enable_auto_exposure) == 1.0:
|
||||||
|
color_sensor.set_option(rs.option.enable_auto_exposure, False)
|
||||||
|
print("auto exposure OFF")
|
||||||
|
else:
|
||||||
|
color_sensor.set_option(rs.option.enable_auto_exposure, True)
|
||||||
|
print("auto exposure ON")
|
||||||
finally:
|
finally:
|
||||||
cv2.destroyAllWindows()
|
cv2.destroyAllWindows()
|
||||||
if self.pipeline is not None:
|
if self.pipeline is not None:
|
||||||
|
@ -309,7 +328,7 @@ class ArucoEstimator:
|
||||||
corner_1_center = self.corner_estimates[corner_1]['pixel_coordinate']
|
corner_1_center = self.corner_estimates[corner_1]['pixel_coordinate']
|
||||||
corner_2_center = self.corner_estimates[corner_2]['pixel_coordinate']
|
corner_2_center = self.corner_estimates[corner_2]['pixel_coordinate']
|
||||||
if corner_1_center is not None and corner_2_center is not None:
|
if corner_1_center is not None and corner_2_center is not None:
|
||||||
frame = cv2.line(frame, corner_1_center, corner_2_center, color=(0, 0, 255), thickness=2)
|
frame = cv2.line(frame, tuple(corner_1_center), tuple(corner_2_center), color=(0, 0, 255), thickness=2)
|
||||||
return frame
|
return frame
|
||||||
|
|
||||||
def draw_grid_lines(self, frame, detected_marker_data):
|
def draw_grid_lines(self, frame, detected_marker_data):
|
||||||
|
@ -364,17 +383,18 @@ class ArucoEstimator:
|
||||||
robot_corners_pixel_coords = {}
|
robot_corners_pixel_coords = {}
|
||||||
for marker_id, estimate in self.robot_marker_estimates.items():
|
for marker_id, estimate in self.robot_marker_estimates.items():
|
||||||
if marker_id in detected_marker_data.keys():
|
if marker_id in detected_marker_data.keys():
|
||||||
robot_corners_pixel_coords[marker_id] = tuple(detected_marker_data[marker_id]['center'])
|
robot_corners_pixel_coords[marker_id] = tuple(detected_marker_data[marker_id]['marker_center'])
|
||||||
|
|
||||||
for marker_id, coord in robot_corners_pixel_coords.items():
|
for marker_id, coord in robot_corners_pixel_coords.items():
|
||||||
x = self.robot_marker_estimates[marker_id]['x']
|
x = self.robot_marker_estimates[marker_id]['x']
|
||||||
y = self.robot_marker_estimates[marker_id]['y']
|
y = self.robot_marker_estimates[marker_id]['y']
|
||||||
angle = self.robot_marker_estimates[marker_id]['angle']
|
angle = self.robot_marker_estimates[marker_id]['angle']
|
||||||
frame = cv2.putText(frame, "pos = ({:5.3f}, {:5.3f}), ang = {:5.3f}".format(x, y, angle), coord,
|
#frame = cv2.putText(frame, "pos = ({:5.3f}, {:5.3f}), ang = {:5.3f}".format(x, y, angle), coord,
|
||||||
cv2.FONT_HERSHEY_SIMPLEX, 0.50, (0, 255, 0))
|
# cv2.FONT_HERSHEY_SIMPLEX, 0.50, (0, 255, 0))
|
||||||
|
frame = cv2.putText(frame, f"{marker_id}", coord, cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 255), thickness=4)
|
||||||
return frame
|
return frame
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
estimator = ArucoEstimator(use_realsense=False, draw_marker_coordinate_system=True)
|
estimator = ArucoEstimator(use_realsense=True, robot_marker_ids=[11, 12, 13, 14])
|
||||||
estimator.run_tracking()
|
estimator.run_tracking(draw_markers=True, invert_grayscale=True)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user