general refactoring

This commit is contained in:
Simon Pirkelmann 2020-10-24 21:08:20 +02:00
parent dd162018d8
commit 7ded3bee79

View File

@ -9,6 +9,7 @@ from queue import Queue
import aruco import aruco
class ArucoEstimator: class ArucoEstimator:
corner_marker_ids = { corner_marker_ids = {
'a': 0, 'a': 0,
@ -17,33 +18,33 @@ class ArucoEstimator:
'd': 3 'd': 3
} }
angles = []
corner_estimates = { corner_estimates = {
'a': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0 }, 'a': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
'b': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0 }, 'b': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
'c': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0 }, 'c': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
'd': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0 }, 'd': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
} }
def __init__(self, robot_marker_ids=None, use_realsense=True, grid_columns=8, grid_rows=8, draw_marker_coordinate_system=False): def __init__(self, robot_marker_ids=None, use_realsense=True, grid_columns=8, grid_rows=8,
draw_marker_coordinate_system=False):
self.grid_columns = grid_columns self.grid_columns = grid_columns
self.grid_rows = grid_rows self.grid_rows = grid_rows
if robot_marker_ids is None: if robot_marker_ids is None:
robot_marker_ids = [] robot_marker_ids = []
self.robot_marker_ids = robot_marker_ids self.robot_marker_ids = robot_marker_ids
self.robot_marker_estimates = dict([(id, None) for id in self.robot_marker_ids]) self.robot_marker_estimates = dict([(marker_id, {'t': None, 'x': None, 'y': None, 'angle': None})
for marker_id in self.robot_marker_ids])
self.event_queue = Queue() self.event_queue = Queue()
self.draw_marker_coordinate_system = draw_marker_coordinate_system self.draw_marker_coordinate_system = draw_marker_coordinate_system
if use_realsense: # check if realsense camera is connected if use_realsense: # check if realsense camera is connected
# Configure depth and color streams # Configure depth and color streams
self.pipeline = rs.pipeline() self.pipeline = rs.pipeline()
config = rs.config() config = rs.config()
# config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) # config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
#config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30) # config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 30)
config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30) config.enable_stream(rs.stream.color, 1280, 720, rs.format.bgr8, 30)
# config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30) # config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
@ -66,11 +67,11 @@ class ArucoEstimator:
# create detector and get parameters # create detector and get parameters
self.detector = aruco.MarkerDetector() self.detector = aruco.MarkerDetector()
#self.detector.setDictionary('ARUCO_MIP_36h12') # self.detector.setDictionary('ARUCO_MIP_36h12')
#self.detector.setDictionary('ARUCO_MIP_16h3') # self.detector.setDictionary('ARUCO_MIP_16h3')
#self.detector.setDictionary('ARUCO') # self.detector.setDictionary('ARUCO')
#self.detector.setDetectionMode(aruco.DM_NORMAL, 0.05) # self.detector.setDetectionMode(aruco.DM_NORMAL, 0.05)
#self.detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.05) # self.detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.05)
self.detector_params = self.detector.getParameters() self.detector_params = self.detector.getParameters()
@ -158,15 +159,15 @@ class ArucoEstimator:
detected_marker_data[marker.id]['Rvec'] = marker.Rvec detected_marker_data[marker.id]['Rvec'] = marker.Rvec
detected_marker_data[marker.id]['Tvec'] = marker.Tvec detected_marker_data[marker.id]['Tvec'] = marker.Tvec
if marker.id > 0: # draw markers onto the image if marker.id > 0: # draw markers onto the image
marker.draw(color_image, np.array([255, 255, 255]), 2, True) marker.draw(color_image, np.array([255, 255, 255]), 2, True)
if self.draw_marker_coordinate_system: if self.draw_marker_coordinate_system:
aruco.CvDrawingUtils.draw3dAxis(color_image, self.camparam, marker.Rvec, marker.Tvec, .1) aruco.CvDrawingUtils.draw3dAxis(color_image, self.camparam, marker.Rvec, marker.Tvec, .1)
# store data # store data
for id, data in detected_marker_data.items(): for marker_id, data in detected_marker_data.items():
self.update_estimate(id, data['marker_center'], data['Rvec'], data['Tvec'], t_image) self.update_estimate(marker_id, data['marker_center'], data['Rvec'], data['Tvec'], t_image)
# draw grid # draw grid
color_image = self.draw_grid_lines(color_image, detected_marker_data) color_image = self.draw_grid_lines(color_image, detected_marker_data)
@ -186,14 +187,14 @@ class ArucoEstimator:
# Stop streaming # Stop streaming
self.pipeline.stop() self.pipeline.stop()
def update_estimate(self, id, pixel_coord_center, rvec, tvec, t_image): def update_estimate(self, marker_id, pixel_coord_center, rvec, tvec, t_image):
# update the marker estimate with new data # update the marker estimate with new data
if id in self.corner_marker_ids.values(): if marker_id in self.corner_marker_ids.values():
# for corner markers we compute the mean of all measurements s.t. the position stabilizes over time # for corner markers we compute the mean of all measurements s.t. the position stabilizes over time
# (we assume the markers do not move) # (we assume the markers do not move)
# get corresponding corner to the detected marker # get corresponding corner to the detected marker
corner = next(filter(lambda key: self.corner_marker_ids[key] == id, self.corner_marker_ids.keys())) corner = next(filter(lambda key: self.corner_marker_ids[key] == marker_id, self.corner_marker_ids.keys()))
old_estimate_x = self.corner_estimates[corner]['x'] old_estimate_x = self.corner_estimates[corner]['x']
old_estimate_y = self.corner_estimates[corner]['y'] old_estimate_y = self.corner_estimates[corner]['y']
n_estimates = self.corner_estimates[corner]['n_estimates'] n_estimates = self.corner_estimates[corner]['n_estimates']
@ -204,7 +205,7 @@ class ArucoEstimator:
new_estimate_x = (n_estimates * old_estimate_x + x) / (n_estimates + 1) # weighted update new_estimate_x = (n_estimates * old_estimate_x + x) / (n_estimates + 1) # weighted update
new_estimate_y = (n_estimates * old_estimate_y + y) / (n_estimates + 1) new_estimate_y = (n_estimates * old_estimate_y + y) / (n_estimates + 1)
else: else:
new_estimate_x = x # first estimate new_estimate_x = x # first estimate
new_estimate_y = y new_estimate_y = y
self.corner_estimates[corner]['t'] = t_image self.corner_estimates[corner]['t'] = t_image
self.corner_estimates[corner]['x'] = new_estimate_x self.corner_estimates[corner]['x'] = new_estimate_x
@ -212,7 +213,7 @@ class ArucoEstimator:
self.corner_estimates[corner]['n_estimates'] = n_estimates + 1 self.corner_estimates[corner]['n_estimates'] = n_estimates + 1
self.corner_estimates[corner]['pixel_coordinate'] = pixel_coord_center self.corner_estimates[corner]['pixel_coordinate'] = pixel_coord_center
elif id in self.robot_marker_ids: elif marker_id in self.robot_marker_ids:
# for robot markers we extract x and y position as well as the angle # for robot markers we extract x and y position as well as the angle
# here we could also implement a filter # here we could also implement a filter
x = tvec[0][0] x = tvec[0][0]
@ -224,8 +225,7 @@ class ArucoEstimator:
_, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat) _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
angle = -euler_angles[2][0] * np.pi / 180.0 angle = -euler_angles[2][0] * np.pi / 180.0
self.angles.append(angle) self.robot_marker_estimates[marker_id] = {'t': t_image, 'x': x, 'y': y, 'angle': angle}
self.robot_marker_estimates[id] = {'t': t_image, 'x': x, 'y': y, 'angle': angle}
def all_corners_detected(self): def all_corners_detected(self):
# checks if all corner markers have been detected at least once # checks if all corner markers have been detected at least once
@ -233,7 +233,7 @@ class ArucoEstimator:
def all_robots_detected(self): def all_robots_detected(self):
# checks if all robot markers have been detected at least once # checks if all robot markers have been detected at least once
return not any([estimate is None for estimate in self.robot_marker_estimates.values()]) return not any([estimate['t'] is None for estimate in self.robot_marker_estimates.values()])
def get_pos_from_grid_point(self, x, y, orientation=None): def get_pos_from_grid_point(self, x, y, orientation=None):
""" """
@ -244,8 +244,8 @@ class ArucoEstimator:
:return: numpy array with corresponding real world x- and y-position :return: numpy array with corresponding real world x- and y-position
if orientation was specified the array also contains the matching angle for the orientation if orientation was specified the array also contains the matching angle for the orientation
""" """
assert x >= 0 and x < self.grid_columns assert 0 <= x < self.grid_columns
assert y >= 0 and y < self.grid_rows assert 0 <= y < self.grid_rows
assert self.all_corners_detected() assert self.all_corners_detected()
# compute column line # compute column line
@ -329,11 +329,11 @@ class ArucoEstimator:
# draw vertical lines # draw vertical lines
vab = b - a vab = b - a
vdc = c - d vdc = c - d
for x in range(1,self.grid_columns): for x in range(1, self.grid_columns):
column_line_top = a + x / self.grid_columns * vab column_line_top = a + x / self.grid_columns * vab
column_line_bottom = d + x / self.grid_columns * vdc column_line_bottom = d + x / self.grid_columns * vdc
frame = cv2.line(frame, tuple(column_line_top), tuple(column_line_bottom), color=(0, 255, 0), frame = cv2.line(frame, tuple(column_line_top), tuple(column_line_bottom), color=(0, 255, 0),
thickness=1) thickness=1)
# draw horizontal lines # draw horizontal lines
vad = d - a vad = d - a
@ -342,35 +342,36 @@ class ArucoEstimator:
row_line_top = a + y / self.grid_rows * vad row_line_top = a + y / self.grid_rows * vad
row_line_bottom = b + y / self.grid_rows * vbc row_line_bottom = b + y / self.grid_rows * vbc
frame = cv2.line(frame, tuple(row_line_top), tuple(row_line_bottom), color=(0, 255, 0), frame = cv2.line(frame, tuple(row_line_top), tuple(row_line_bottom), color=(0, 255, 0),
thickness=1) thickness=1)
return frame return frame
def get_robot_state_estimate(self, id): def get_robot_state_estimate(self, marker_id):
if id in self.robot_marker_estimates: if marker_id in self.robot_marker_estimates:
if self.robot_marker_estimates[id] is not None: if self.robot_marker_estimates[marker_id]['t'] is not None:
return np.array([self.robot_marker_estimates[id]['x'], self.robot_marker_estimates[id]['y'], return np.array([self.robot_marker_estimates[marker_id]['x'],
self.robot_marker_estimates[id]['angle']]) self.robot_marker_estimates[marker_id]['y'],
self.robot_marker_estimates[marker_id]['angle']])
else: else:
print(f"error: no estimate available for robot {id}") print(f"error: no estimate available for robot {marker_id}")
return None return None
else: else:
print(f"error: invalid robot id {id}") print(f"error: invalid robot id {marker_id}")
return None return None
def draw_robot_pos(self, frame, detected_marker_data): def draw_robot_pos(self, frame, detected_marker_data):
# draws information about the robot positions onto the given frame # draws information about the robot positions onto the given frame
robot_corners_pixel_coords = {} robot_corners_pixel_coords = {}
for id, estimate in self.robot_marker_estimates.items(): for marker_id, estimate in self.robot_marker_estimates.items():
if id in detected_marker_data.keys(): if marker_id in detected_marker_data.keys():
robot_corners_pixel_coords[id] = tuple(detected_marker_data[id]['center']) robot_corners_pixel_coords[marker_id] = tuple(detected_marker_data[marker_id]['center'])
for id, coord in robot_corners_pixel_coords.items(): for marker_id, coord in robot_corners_pixel_coords.items():
x = self.robot_marker_estimates[id]['x'] x = self.robot_marker_estimates[marker_id]['x']
y = self.robot_marker_estimates[id]['y'] y = self.robot_marker_estimates[marker_id]['y']
angle = self.robot_marker_estimates[id]['angle'] angle = self.robot_marker_estimates[marker_id]['angle']
frame = cv2.putText(frame, "pos = ({:5.3f}, {:5.3f}), ang = {:5.3f}".format(x, y, angle), coord, frame = cv2.putText(frame, "pos = ({:5.3f}, {:5.3f}), ang = {:5.3f}".format(x, y, angle), coord,
cv2.FONT_HERSHEY_SIMPLEX, 0.50, (0,255,0)) cv2.FONT_HERSHEY_SIMPLEX, 0.50, (0, 255, 0))
return frame return frame