enable auto-exposure by default, some cleanup
This commit is contained in:
parent
f9f4a2c1c6
commit
2607e94958
|
@ -5,7 +5,7 @@ import os
|
||||||
import time
|
import time
|
||||||
import math
|
import math
|
||||||
from pyqtgraph.Qt import QtCore, QtGui
|
from pyqtgraph.Qt import QtCore, QtGui
|
||||||
from pyqtgraph.parametertree import Parameter, ParameterTree
|
from pyqtgraph.parametertree import Parameter, ParameterTree, parameterTypes
|
||||||
import pyqtgraph as pg
|
import pyqtgraph as pg
|
||||||
|
|
||||||
|
|
||||||
|
@ -28,8 +28,40 @@ class FPSCounter:
|
||||||
self.fps_start_time = time.time()
|
self.fps_start_time = time.time()
|
||||||
return self.fps_counter / delta_t
|
return self.fps_counter / delta_t
|
||||||
|
|
||||||
|
## test add/remove
|
||||||
|
## this group includes a menu allowing the user to add new parameters into its child list
|
||||||
|
class RobotMarkerGroup(parameterTypes.GroupParameter):
|
||||||
|
def __init__(self, **opts):
|
||||||
|
opts['type'] = 'group'
|
||||||
|
opts['addText'] = "Add Marker"
|
||||||
|
opts['addList'] = ['Robot']
|
||||||
|
parameterTypes.GroupParameter.__init__(self, **opts)
|
||||||
|
|
||||||
|
def addNew(self, typ):
|
||||||
|
current_robots = self.children()
|
||||||
|
current_indices = [int(r.name().split(' ')[1]) for r in current_robots]
|
||||||
|
new_index = len(current_indices) + 1
|
||||||
|
val = 0
|
||||||
|
self.addChild(dict(name=f"Robot {new_index}", type='int', value=val, removable=False,
|
||||||
|
renamable=True))
|
||||||
|
|
||||||
|
class CornerMarkerGroup(parameterTypes.GroupParameter):
|
||||||
|
def __init__(self, **opts):
|
||||||
|
opts['type'] = 'group'
|
||||||
|
opts['addText'] = "Add Marker"
|
||||||
|
opts['addList'] = ['Corner']
|
||||||
|
parameterTypes.GroupParameter.__init__(self, **opts)
|
||||||
|
|
||||||
|
def addNew(self, typ):
|
||||||
|
current_corners = self.children()
|
||||||
|
current_chars = [str(r.name().split(' ')[1]) for r in current_corners]
|
||||||
|
new_char = chr(ord(current_chars[-1]) + 1)
|
||||||
|
val = 0
|
||||||
|
self.addChild(dict(name=f"Corner {new_char}", type='int', value=val, removable=False))
|
||||||
|
|
||||||
|
|
||||||
class ArucoEstimator:
|
class ArucoEstimator:
|
||||||
def __init__(self, robot_marker_ids=None, use_realsense=True, grid_columns=12, grid_rows=12):
|
def __init__(self, robot_marker_ids=None, use_realsense=True, grid_columns=8, grid_rows=8):
|
||||||
self.app = QtGui.QApplication([])
|
self.app = QtGui.QApplication([])
|
||||||
|
|
||||||
## Create window with GraphicsView widget
|
## Create window with GraphicsView widget
|
||||||
|
@ -59,7 +91,7 @@ class ArucoEstimator:
|
||||||
self.fps_overlay = pg.TextItem('fps = 0', color=(255, 255, 0), anchor=(0,1))
|
self.fps_overlay = pg.TextItem('fps = 0', color=(255, 255, 0), anchor=(0,1))
|
||||||
self.plotwidget.addItem(self.fps_overlay)
|
self.plotwidget.addItem(self.fps_overlay)
|
||||||
|
|
||||||
self.invert_grayscale = False
|
self.invert_grayscale = True
|
||||||
self.draw_grid = False
|
self.draw_grid = False
|
||||||
self.draw_markers = True
|
self.draw_markers = True
|
||||||
self.draw_marker_coordinate_system = False
|
self.draw_marker_coordinate_system = False
|
||||||
|
@ -76,6 +108,8 @@ class ArucoEstimator:
|
||||||
'c': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
|
'c': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
|
||||||
'd': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
|
'd': {'pixel_coordinate': None, 'x': None, 'y': None, 'n_estimates': 0},
|
||||||
}
|
}
|
||||||
|
self.grid_columns = grid_columns
|
||||||
|
self.grid_rows = grid_rows
|
||||||
|
|
||||||
self.robot_marker_size = 0.07
|
self.robot_marker_size = 0.07
|
||||||
if robot_marker_ids is None:
|
if robot_marker_ids is None:
|
||||||
|
@ -84,6 +118,12 @@ class ArucoEstimator:
|
||||||
self.robot_marker_estimates = dict([(marker_id, {'t': None, 'x': None, 'y': None, 'angle': None})
|
self.robot_marker_estimates = dict([(marker_id, {'t': None, 'x': None, 'y': None, 'angle': None})
|
||||||
for marker_id in self.robot_marker_ids])
|
for marker_id in self.robot_marker_ids])
|
||||||
|
|
||||||
|
robot_marker_group = [{'name': f'Robot {ind}', 'type': 'int', 'value': marker_id} for ind, marker_id in
|
||||||
|
enumerate(self.robot_marker_ids, 1)]
|
||||||
|
corner_marker_group = [{'name': f'Corner {letter}', 'type': 'int', 'value': marker_id} for letter, marker_id in
|
||||||
|
self.corner_marker_ids.items()]
|
||||||
|
|
||||||
|
self.threshold = 10.0
|
||||||
|
|
||||||
# parameter
|
# parameter
|
||||||
params_spec = [
|
params_spec = [
|
||||||
|
@ -93,27 +133,35 @@ class ArucoEstimator:
|
||||||
'suffix': 'm', 'limits': (0.01, 1), 'step': 0.001},
|
'suffix': 'm', 'limits': (0.01, 1), 'step': 0.001},
|
||||||
{'name': 'Draw markers', 'type': 'bool', 'value': self.draw_markers},
|
{'name': 'Draw markers', 'type': 'bool', 'value': self.draw_markers},
|
||||||
{'name': 'Draw marker coordinate system', 'type': 'bool', 'value': self.draw_marker_coordinate_system},
|
{'name': 'Draw marker coordinate system', 'type': 'bool', 'value': self.draw_marker_coordinate_system},
|
||||||
|
{'name': 'Threshold', 'type': 'float', 'value': self.threshold},
|
||||||
{'name': 'Invert grayscale', 'type': 'bool', 'value': self.invert_grayscale, 'tip': "Invert grayscale image before marker detection"},
|
{'name': 'Invert grayscale', 'type': 'bool', 'value': self.invert_grayscale, 'tip': "Invert grayscale image before marker detection"},
|
||||||
{'name': 'Show FPS', 'type': 'bool', 'value': True, 'tip': "Display frames per second counter"},
|
{'name': 'Show FPS', 'type': 'bool', 'value': True, 'tip': "Display frames per second counter"},
|
||||||
{'name': 'Draw grid', 'type': 'bool', 'value': self.draw_grid, 'tip': "Draw grid spanned by the markers 0 - 3"},
|
{'name': 'Draw grid', 'type': 'bool', 'value': self.draw_grid, 'tip': "Draw grid spanned by the markers 0 - 3"},
|
||||||
{'name': 'Display mode', 'type': 'list', 'values': ['color', 'grayscale'], 'value': 'color', 'tip': "Display mode for the video"}
|
{'name': 'Grid columns', 'type': 'int', 'value': self.grid_columns, 'tip': "Number of columns for the grid"},
|
||||||
|
{'name': 'Grid rows', 'type': 'int', 'value': self.grid_rows, 'tip': "Number of rows for the grid"},
|
||||||
|
{'name': 'Display mode', 'type': 'list', 'values': ['color', 'grayscale'], 'value': 'color', 'tip': "Display mode for the video"},
|
||||||
|
{'name': 'Autoexposure', 'type': 'bool', 'value': True},
|
||||||
|
RobotMarkerGroup(name="Robot markers", children=robot_marker_group),
|
||||||
|
CornerMarkerGroup(name="Corner markers", children=corner_marker_group),
|
||||||
]
|
]
|
||||||
self.params = Parameter.create(name='params', type='group', children=params_spec)
|
self.params = Parameter.create(name='params', type='group', children=params_spec)
|
||||||
self.params.param('Invert grayscale').sigValueChanged.connect(lambda _, v: self.__setattr__('invert_grayscale', v))
|
self.params.param('Invert grayscale').sigValueChanged.connect(lambda _, v: self.__setattr__('invert_grayscale', v))
|
||||||
|
self.params.param('Threshold').sigValueChanged.connect(lambda _, v: self.__setattr__('threshold', v))
|
||||||
self.params.param('Draw markers').sigValueChanged.connect(lambda _, v: self.__setattr__('draw_markers', v))
|
self.params.param('Draw markers').sigValueChanged.connect(lambda _, v: self.__setattr__('draw_markers', v))
|
||||||
self.params.param('Draw marker coordinate system').sigValueChanged.connect(lambda _, v: self.__setattr__('draw_marker_coordinate_system', v))
|
self.params.param('Draw marker coordinate system').sigValueChanged.connect(lambda _, v: self.__setattr__('draw_marker_coordinate_system', v))
|
||||||
self.params.param('Draw grid').sigValueChanged.connect(lambda _, v: self.__setattr__('draw_grid', v))
|
self.params.param('Draw grid').sigValueChanged.connect(lambda _, v: self.__setattr__('draw_grid', v))
|
||||||
|
self.params.param('Grid columns').sigValueChanged.connect(lambda _, v: self.__setattr__('grid_columns', v))
|
||||||
|
self.params.param('Grid rows').sigValueChanged.connect(lambda _, v: self.__setattr__('grid_rows', v))
|
||||||
self.params.param('Corner marker size').sigValueChanged.connect(lambda _, v: self.__setattr__('corner_marker_size', v))
|
self.params.param('Corner marker size').sigValueChanged.connect(lambda _, v: self.__setattr__('corner_marker_size', v))
|
||||||
self.params.param('Robot marker size').sigValueChanged.connect(lambda _, v: self.__setattr__('robot_marker_size', v))
|
self.params.param('Robot marker size').sigValueChanged.connect(lambda _, v: self.__setattr__('robot_marker_size', v))
|
||||||
self.params.param('Show FPS').sigValueChanged.connect(lambda _, v: self.fps_overlay.show() if v else self.fps_overlay.hide())
|
self.params.param('Show FPS').sigValueChanged.connect(lambda _, v: self.fps_overlay.show() if v else self.fps_overlay.hide())
|
||||||
|
self.params.param('Autoexposure').sigValueChanged.connect(lambda _, v: self.set_autoexposure(v))
|
||||||
|
|
||||||
self.paramtree = ParameterTree()
|
self.paramtree = ParameterTree()
|
||||||
self.paramtree.setParameters(self.params, showTop=False)
|
self.paramtree.setParameters(self.params, showTop=False)
|
||||||
self.layout.addWidget(self.paramtree)
|
self.layout.addWidget(self.paramtree)
|
||||||
self.win.show() ## show widget alone in its own window
|
self.win.show() ## show widget alone in its own window
|
||||||
|
|
||||||
self.grid_columns = grid_columns
|
|
||||||
self.grid_rows = grid_rows
|
|
||||||
|
|
||||||
self.event_queue = Queue()
|
self.event_queue = Queue()
|
||||||
|
|
||||||
if use_realsense: # check if realsense camera is connected
|
if use_realsense: # check if realsense camera is connected
|
||||||
|
@ -126,9 +174,8 @@ class ArucoEstimator:
|
||||||
# Start streaming
|
# Start streaming
|
||||||
self.pipeline.start(config)
|
self.pipeline.start(config)
|
||||||
|
|
||||||
# disable auto exposure
|
# enable auto exposure
|
||||||
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
|
self.set_autoexposure(self.params['Autoexposure'])
|
||||||
color_sensor.set_option(rs.option.enable_auto_exposure, False)
|
|
||||||
|
|
||||||
camera_intrinsics = self.pipeline.get_active_profile().get_stream(
|
camera_intrinsics = self.pipeline.get_active_profile().get_stream(
|
||||||
rs.stream.color).as_video_stream_profile().get_intrinsics()
|
rs.stream.color).as_video_stream_profile().get_intrinsics()
|
||||||
|
@ -145,16 +192,13 @@ class ArucoEstimator:
|
||||||
self.pipeline = None
|
self.pipeline = None
|
||||||
|
|
||||||
# create detector and get parameters
|
# create detector and get parameters
|
||||||
self.detector = aruco.MarkerDetector()
|
|
||||||
self.detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.05)
|
|
||||||
|
|
||||||
self.detector_params = self.detector.getParameters()
|
|
||||||
|
|
||||||
# print detector parameters
|
# print detector parameters
|
||||||
print("detector params:")
|
# print("detector params:")
|
||||||
for val in dir(self.detector_params):
|
# print(self.detector_params)
|
||||||
if not val.startswith("__"):
|
# for val in dir(self.detector_params):
|
||||||
print("\t{} : {}".format(val, self.detector_params.__getattribute__(val)))
|
# if not val.startswith("__"):
|
||||||
|
# print("\t{} : {}".format(val, self.detector_params.__getattribute__(val)))
|
||||||
|
|
||||||
self.camparam = aruco.CameraParameters()
|
self.camparam = aruco.CameraParameters()
|
||||||
if use_realsense:
|
if use_realsense:
|
||||||
|
@ -166,6 +210,12 @@ class ArucoEstimator:
|
||||||
self.drag_line_start = None
|
self.drag_line_start = None
|
||||||
self.previous_click = None
|
self.previous_click = None
|
||||||
|
|
||||||
|
def set_autoexposure(self, v):
|
||||||
|
if self.pipeline is not None:
|
||||||
|
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
|
||||||
|
color_sensor.set_option(rs.option.enable_auto_exposure, v)
|
||||||
|
print(color_sensor.get_option(rs.option.enable_auto_exposure))
|
||||||
|
|
||||||
def compute_clicked_position(self, px, py):
|
def compute_clicked_position(self, px, py):
|
||||||
if self.all_corners_detected():
|
if self.all_corners_detected():
|
||||||
# inter/extrapolate from clicked point to marker position
|
# inter/extrapolate from clicked point to marker position
|
||||||
|
@ -242,8 +292,20 @@ class ArucoEstimator:
|
||||||
if self.invert_grayscale:
|
if self.invert_grayscale:
|
||||||
cv2.bitwise_not(gray, gray)
|
cv2.bitwise_not(gray, gray)
|
||||||
|
|
||||||
|
detector = aruco.MarkerDetector()
|
||||||
|
detector.setDetectionMode(aruco.DM_VIDEO_FAST, 0.01)
|
||||||
|
|
||||||
|
#detector_params = detector.getParameters()
|
||||||
|
|
||||||
# run aruco marker detection
|
# run aruco marker detection
|
||||||
detected_markers = self.detector.detect(gray)
|
detected_markers = detector.detect(gray)
|
||||||
|
# detected_markers2 = detector.detect(gray)
|
||||||
|
#gray = detector.getThresholdedImage()
|
||||||
|
|
||||||
|
display_mode = self.params.param('Display mode').value()
|
||||||
|
|
||||||
|
#print(f"detected_markers = {[marker.id for marker in detected_markers]}")
|
||||||
|
#print("threshold = ", self.threshold)
|
||||||
|
|
||||||
# extract data for detected markers
|
# extract data for detected markers
|
||||||
detected_marker_data = {}
|
detected_marker_data = {}
|
||||||
|
@ -282,7 +344,6 @@ class ArucoEstimator:
|
||||||
|
|
||||||
# Show images
|
# Show images
|
||||||
color_image_rgb = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB) # convert to RGB
|
color_image_rgb = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB) # convert to RGB
|
||||||
display_mode = self.params.param('Display mode').value()
|
|
||||||
if display_mode == 'color':
|
if display_mode == 'color':
|
||||||
self.img.setImage(np.transpose(np.flipud(color_image_rgb), axes=(1, 0, 2)))
|
self.img.setImage(np.transpose(np.flipud(color_image_rgb), axes=(1, 0, 2)))
|
||||||
elif display_mode == 'grayscale':
|
elif display_mode == 'grayscale':
|
||||||
|
@ -298,15 +359,6 @@ class ArucoEstimator:
|
||||||
# Stop streaming
|
# Stop streaming
|
||||||
self.pipeline.stop()
|
self.pipeline.stop()
|
||||||
self.app.quit()
|
self.app.quit()
|
||||||
elif key == QtCore.Qt.Key_X:
|
|
||||||
if self.pipeline is not None:
|
|
||||||
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
|
|
||||||
if color_sensor.get_option(rs.option.enable_auto_exposure) == 1.0:
|
|
||||||
color_sensor.set_option(rs.option.enable_auto_exposure, False)
|
|
||||||
print("auto exposure OFF")
|
|
||||||
else:
|
|
||||||
color_sensor.set_option(rs.option.enable_auto_exposure, True)
|
|
||||||
print("auto exposure ON")
|
|
||||||
elif key == QtCore.Qt.Key_I:
|
elif key == QtCore.Qt.Key_I:
|
||||||
self.invert_grayscale = not self.invert_grayscale
|
self.invert_grayscale = not self.invert_grayscale
|
||||||
|
|
||||||
|
@ -432,7 +484,7 @@ class ArucoEstimator:
|
||||||
corner_1_center = self.corner_estimates[corner_1]['pixel_coordinate']
|
corner_1_center = self.corner_estimates[corner_1]['pixel_coordinate']
|
||||||
corner_2_center = self.corner_estimates[corner_2]['pixel_coordinate']
|
corner_2_center = self.corner_estimates[corner_2]['pixel_coordinate']
|
||||||
if corner_1_center is not None and corner_2_center is not None:
|
if corner_1_center is not None and corner_2_center is not None:
|
||||||
frame = cv2.line(frame, tuple(corner_1_center), tuple(corner_2_center), color=(0, 0, 255), thickness=2)
|
frame = cv2.line(frame, tuple(corner_1_center.astype(int)), tuple(corner_2_center.astype(int)), color=(0, 0, 255), thickness=2)
|
||||||
return frame
|
return frame
|
||||||
|
|
||||||
def draw_grid_lines(self, frame, detected_marker_data):
|
def draw_grid_lines(self, frame, detected_marker_data):
|
||||||
|
@ -455,7 +507,7 @@ class ArucoEstimator:
|
||||||
for x in range(1, self.grid_columns):
|
for x in range(1, self.grid_columns):
|
||||||
column_line_top = a + x / self.grid_columns * vab
|
column_line_top = a + x / self.grid_columns * vab
|
||||||
column_line_bottom = d + x / self.grid_columns * vdc
|
column_line_bottom = d + x / self.grid_columns * vdc
|
||||||
frame = cv2.line(frame, tuple(column_line_top), tuple(column_line_bottom), color=(0, 255, 0),
|
frame = cv2.line(frame, tuple(column_line_top.astype(int)), tuple(column_line_bottom.astype(int)), color=(0, 255, 0),
|
||||||
thickness=1)
|
thickness=1)
|
||||||
|
|
||||||
# draw horizontal lines
|
# draw horizontal lines
|
||||||
|
@ -464,7 +516,7 @@ class ArucoEstimator:
|
||||||
for y in range(1, self.grid_rows):
|
for y in range(1, self.grid_rows):
|
||||||
row_line_top = a + y / self.grid_rows * vad
|
row_line_top = a + y / self.grid_rows * vad
|
||||||
row_line_bottom = b + y / self.grid_rows * vbc
|
row_line_bottom = b + y / self.grid_rows * vbc
|
||||||
frame = cv2.line(frame, tuple(row_line_top), tuple(row_line_bottom), color=(0, 255, 0),
|
frame = cv2.line(frame, tuple(row_line_top.astype(int)), tuple(row_line_bottom.astype(int)), color=(0, 255, 0),
|
||||||
thickness=1)
|
thickness=1)
|
||||||
|
|
||||||
return frame
|
return frame
|
||||||
|
@ -500,7 +552,7 @@ class ArucoEstimator:
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
estimator = ArucoEstimator(use_realsense=False, robot_marker_ids=[11, 12, 13, 14])
|
estimator = ArucoEstimator(use_realsense=True, robot_marker_ids=[11, 12, 13, 14])
|
||||||
estimator.process_frame()
|
estimator.process_frame()
|
||||||
|
|
||||||
import sys
|
import sys
|
||||||
|
|
Loading…
Reference in New Issue
Block a user