enabled better mouse interaction
This commit is contained in:
parent
eff5474ac2
commit
6b9373cce5
|
@ -3,6 +3,7 @@ import numpy as np
|
||||||
import cv2
|
import cv2
|
||||||
import os
|
import os
|
||||||
import time
|
import time
|
||||||
|
import math
|
||||||
|
|
||||||
from shapely.geometry import LineString
|
from shapely.geometry import LineString
|
||||||
from queue import Queue
|
from queue import Queue
|
||||||
|
@ -87,18 +88,12 @@ class ArucoEstimator:
|
||||||
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "realsense.yml"))
|
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "realsense.yml"))
|
||||||
else:
|
else:
|
||||||
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml"))
|
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml"))
|
||||||
print(self.camparam)
|
|
||||||
|
|
||||||
def mouse_callback(self, event, px, py, flags, param):
|
self.drag_line_end = None
|
||||||
"""
|
self.drag_line_start = None
|
||||||
This function is called for each mouse event inside the opencv window.
|
self.previous_click = None
|
||||||
If there are reference markers available we compute the real world position corresponding to the clicked
|
|
||||||
position and put it in an event queue.
|
def compute_clicked_position(self, px, py):
|
||||||
:param event: type of event (mouse movement, left click, right click, etc.)
|
|
||||||
:param px: x-position of event
|
|
||||||
:param py: y-position of event
|
|
||||||
"""
|
|
||||||
if event == cv2.EVENT_LBUTTONDOWN:
|
|
||||||
if self.all_corners_detected():
|
if self.all_corners_detected():
|
||||||
# inter/extrapolate from clicked point to marker position
|
# inter/extrapolate from clicked point to marker position
|
||||||
px1 = self.corner_estimates['a']['pixel_coordinate'][0]
|
px1 = self.corner_estimates['a']['pixel_coordinate'][0]
|
||||||
|
@ -118,12 +113,52 @@ class ArucoEstimator:
|
||||||
|
|
||||||
target_x = x1 + alpha * (x3 - x1)
|
target_x = x1 + alpha * (x3 - x1)
|
||||||
target_y = y1 + beta * (y3 - y1)
|
target_y = y1 + beta * (y3 - y1)
|
||||||
target = np.array([target_x, target_y, 0])
|
target = np.array([target_x, target_y])
|
||||||
|
|
||||||
print(f"target = ({target_x},{target_y})")
|
#print(f"target = ({target_x},{target_y})")
|
||||||
self.event_queue.put(('click', target))
|
|
||||||
else:
|
else:
|
||||||
print("not all markers have been detected!")
|
print("not all markers have been detected!")
|
||||||
|
target = np.array([px, -py])
|
||||||
|
return target
|
||||||
|
|
||||||
|
def mouse_callback(self, event, px, py, flags, param):
|
||||||
|
"""
|
||||||
|
This function is called for each mouse event inside the opencv window.
|
||||||
|
If there are reference markers available we compute the real world position corresponding to the clicked
|
||||||
|
position and put it in an event queue.
|
||||||
|
:param event: type of event (mouse movement, left click, right click, etc.)
|
||||||
|
:param px: x-position of event
|
||||||
|
:param py: y-position of event
|
||||||
|
"""
|
||||||
|
target = None
|
||||||
|
|
||||||
|
if event == cv2.EVENT_LBUTTONDOWN:
|
||||||
|
self.drag_line_start = (px, py)
|
||||||
|
elif event == cv2.EVENT_LBUTTONUP:
|
||||||
|
self.drag_line_end = (px, py)
|
||||||
|
target_pos = self.compute_clicked_position(self.drag_line_start[0], self.drag_line_start[1])
|
||||||
|
if self.drag_line_start != self.drag_line_end:
|
||||||
|
# compute target angle for clicked position
|
||||||
|
facing_pos = self.compute_clicked_position(px, py)
|
||||||
|
v = facing_pos - target_pos
|
||||||
|
target_angle = math.atan2(v[1], v[0])
|
||||||
|
else:
|
||||||
|
# determine angle from previously clicked pos (= self.drag_line_end)
|
||||||
|
if self.previous_click is not None:
|
||||||
|
previous_pos = self.compute_clicked_position(self.previous_click[0], self.previous_click[1])
|
||||||
|
v = target_pos - previous_pos
|
||||||
|
target_angle = math.atan2(v[1], v[0])
|
||||||
|
else:
|
||||||
|
target_angle = 0.0
|
||||||
|
|
||||||
|
target = np.array([target_pos[0], target_pos[1], target_angle])
|
||||||
|
print(target)
|
||||||
|
self.previous_click = (px, py)
|
||||||
|
self.event_queue.put(('click', {'x': target[0], 'y': target[1], 'angle': target[2]}))
|
||||||
|
self.drag_line_start = None
|
||||||
|
elif event == cv2.EVENT_MOUSEMOVE:
|
||||||
|
if self.drag_line_start is not None:
|
||||||
|
self.drag_line_end = (px, py)
|
||||||
|
|
||||||
def run_tracking(self, draw_markers=True, draw_marker_coordinate_system=False, invert_grayscale=False):
|
def run_tracking(self, draw_markers=True, draw_marker_coordinate_system=False, invert_grayscale=False):
|
||||||
"""
|
"""
|
||||||
|
@ -157,16 +192,16 @@ class ArucoEstimator:
|
||||||
# extract data for detected markers
|
# extract data for detected markers
|
||||||
detected_marker_data = {}
|
detected_marker_data = {}
|
||||||
for marker in detected_markers:
|
for marker in detected_markers:
|
||||||
detected_marker_data[marker.id] = {'marker_center': marker.getCenter()}
|
|
||||||
if marker.id >= 0:
|
if marker.id >= 0:
|
||||||
|
detected_marker_data[marker.id] = {'marker_center': marker.getCenter()}
|
||||||
|
|
||||||
if marker.id in self.corner_marker_ids.values():
|
if marker.id in self.corner_marker_ids.values():
|
||||||
marker.calculateExtrinsics(0.1, self.camparam)
|
marker.calculateExtrinsics(0.075, self.camparam)
|
||||||
else:
|
else:
|
||||||
marker.calculateExtrinsics(0.07, self.camparam)
|
marker.calculateExtrinsics(0.07, self.camparam)
|
||||||
detected_marker_data[marker.id]['Rvec'] = marker.Rvec
|
detected_marker_data[marker.id]['Rvec'] = marker.Rvec
|
||||||
detected_marker_data[marker.id]['Tvec'] = marker.Tvec
|
detected_marker_data[marker.id]['Tvec'] = marker.Tvec
|
||||||
|
|
||||||
if marker.id >= 0: # draw markers onto the image
|
|
||||||
if draw_markers:
|
if draw_markers:
|
||||||
marker.draw(color_image, np.array([255, 255, 255]), 2, True)
|
marker.draw(color_image, np.array([255, 255, 255]), 2, True)
|
||||||
|
|
||||||
|
@ -182,6 +217,10 @@ class ArucoEstimator:
|
||||||
color_image = self.draw_grid_lines(color_image, detected_marker_data)
|
color_image = self.draw_grid_lines(color_image, detected_marker_data)
|
||||||
color_image = self.draw_robot_pos(color_image, detected_marker_data)
|
color_image = self.draw_robot_pos(color_image, detected_marker_data)
|
||||||
|
|
||||||
|
# draw drag
|
||||||
|
if self.drag_line_start is not None and self.drag_line_end is not None:
|
||||||
|
color_image = cv2.line(color_image, self.drag_line_start, self.drag_line_end, color=(0, 0, 255), thickness=2)
|
||||||
|
|
||||||
# Show images
|
# Show images
|
||||||
cv2.imshow('RoboRally', color_image)
|
cv2.imshow('RoboRally', color_image)
|
||||||
key = cv2.waitKey(1)
|
key = cv2.waitKey(1)
|
||||||
|
@ -192,7 +231,7 @@ class ArucoEstimator:
|
||||||
self.draw_grid = not self.draw_grid
|
self.draw_grid = not self.draw_grid
|
||||||
if key == ord('q'):
|
if key == ord('q'):
|
||||||
running = False
|
running = False
|
||||||
if key == ord('a'):
|
if key == ord('x'):
|
||||||
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
|
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
|
||||||
if color_sensor.get_option(rs.option.enable_auto_exposure) == 1.0:
|
if color_sensor.get_option(rs.option.enable_auto_exposure) == 1.0:
|
||||||
color_sensor.set_option(rs.option.enable_auto_exposure, False)
|
color_sensor.set_option(rs.option.enable_auto_exposure, False)
|
||||||
|
@ -200,6 +239,8 @@ class ArucoEstimator:
|
||||||
else:
|
else:
|
||||||
color_sensor.set_option(rs.option.enable_auto_exposure, True)
|
color_sensor.set_option(rs.option.enable_auto_exposure, True)
|
||||||
print("auto exposure ON")
|
print("auto exposure ON")
|
||||||
|
if key == ord('i'):
|
||||||
|
invert_grayscale = not invert_grayscale
|
||||||
finally:
|
finally:
|
||||||
cv2.destroyAllWindows()
|
cv2.destroyAllWindows()
|
||||||
if self.pipeline is not None:
|
if self.pipeline is not None:
|
||||||
|
@ -244,7 +285,7 @@ class ArucoEstimator:
|
||||||
_, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
|
_, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
|
||||||
angle = -euler_angles[2][0] * np.pi / 180.0
|
angle = -euler_angles[2][0] * np.pi / 180.0
|
||||||
|
|
||||||
self.robot_marker_estimates[marker_id] = {'t': t_image, 'x': x, 'y': y, 'angle': angle}
|
self.robot_marker_estimates[marker_id] = {'t': float(t_image), 'x': float(x), 'y': float(y), 'angle': float(angle)}
|
||||||
|
|
||||||
def all_corners_detected(self):
|
def all_corners_detected(self):
|
||||||
# checks if all corner markers have been detected at least once
|
# checks if all corner markers have been detected at least once
|
||||||
|
|
Loading…
Reference in New Issue
Block a user