enabled better mouse interaction

master
Simon Pirkelmann 2020-11-14 15:01:01 +01:00
parent eff5474ac2
commit 6b9373cce5
1 changed files with 71 additions and 30 deletions

View File

@ -3,6 +3,7 @@ import numpy as np
import cv2 import cv2
import os import os
import time import time
import math
from shapely.geometry import LineString from shapely.geometry import LineString
from queue import Queue from queue import Queue
@ -87,7 +88,38 @@ class ArucoEstimator:
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "realsense.yml")) self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "realsense.yml"))
else: else:
self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml")) self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml"))
print(self.camparam)
self.drag_line_end = None
self.drag_line_start = None
self.previous_click = None
def compute_clicked_position(self, px, py):
if self.all_corners_detected():
# inter/extrapolate from clicked point to marker position
px1 = self.corner_estimates['a']['pixel_coordinate'][0]
px3 = self.corner_estimates['c']['pixel_coordinate'][0]
py1 = self.corner_estimates['a']['pixel_coordinate'][1]
py3 = self.corner_estimates['c']['pixel_coordinate'][1]
x1 = self.corner_estimates['a']['x']
x3 = self.corner_estimates['c']['x']
y1 = self.corner_estimates['a']['y']
y3 = self.corner_estimates['c']['y']
alpha = (px - px1) / (px3 - px1)
beta = (py - py1) / (py3 - py1)
print(f"alpha = {alpha}, beta = {beta}")
target_x = x1 + alpha * (x3 - x1)
target_y = y1 + beta * (y3 - y1)
target = np.array([target_x, target_y])
#print(f"target = ({target_x},{target_y})")
else:
print("not all markers have been detected!")
target = np.array([px, -py])
return target
def mouse_callback(self, event, px, py, flags, param): def mouse_callback(self, event, px, py, flags, param):
""" """
@ -98,32 +130,35 @@ class ArucoEstimator:
:param px: x-position of event :param px: x-position of event
:param py: y-position of event :param py: y-position of event
""" """
target = None
if event == cv2.EVENT_LBUTTONDOWN: if event == cv2.EVENT_LBUTTONDOWN:
if self.all_corners_detected(): self.drag_line_start = (px, py)
# inter/extrapolate from clicked point to marker position elif event == cv2.EVENT_LBUTTONUP:
px1 = self.corner_estimates['a']['pixel_coordinate'][0] self.drag_line_end = (px, py)
px3 = self.corner_estimates['c']['pixel_coordinate'][0] target_pos = self.compute_clicked_position(self.drag_line_start[0], self.drag_line_start[1])
py1 = self.corner_estimates['a']['pixel_coordinate'][1] if self.drag_line_start != self.drag_line_end:
py3 = self.corner_estimates['c']['pixel_coordinate'][1] # compute target angle for clicked position
facing_pos = self.compute_clicked_position(px, py)
x1 = self.corner_estimates['a']['x'] v = facing_pos - target_pos
x3 = self.corner_estimates['c']['x'] target_angle = math.atan2(v[1], v[0])
y1 = self.corner_estimates['a']['y']
y3 = self.corner_estimates['c']['y']
alpha = (px - px1) / (px3 - px1)
beta = (py - py1) / (py3 - py1)
print(f"alpha = {alpha}, beta = {beta}")
target_x = x1 + alpha * (x3 - x1)
target_y = y1 + beta * (y3 - y1)
target = np.array([target_x, target_y, 0])
print(f"target = ({target_x},{target_y})")
self.event_queue.put(('click', target))
else: else:
print("not all markers have been detected!") # determine angle from previously clicked pos (= self.drag_line_end)
if self.previous_click is not None:
previous_pos = self.compute_clicked_position(self.previous_click[0], self.previous_click[1])
v = target_pos - previous_pos
target_angle = math.atan2(v[1], v[0])
else:
target_angle = 0.0
target = np.array([target_pos[0], target_pos[1], target_angle])
print(target)
self.previous_click = (px, py)
self.event_queue.put(('click', {'x': target[0], 'y': target[1], 'angle': target[2]}))
self.drag_line_start = None
elif event == cv2.EVENT_MOUSEMOVE:
if self.drag_line_start is not None:
self.drag_line_end = (px, py)
def run_tracking(self, draw_markers=True, draw_marker_coordinate_system=False, invert_grayscale=False): def run_tracking(self, draw_markers=True, draw_marker_coordinate_system=False, invert_grayscale=False):
""" """
@ -157,16 +192,16 @@ class ArucoEstimator:
# extract data for detected markers # extract data for detected markers
detected_marker_data = {} detected_marker_data = {}
for marker in detected_markers: for marker in detected_markers:
detected_marker_data[marker.id] = {'marker_center': marker.getCenter()}
if marker.id >= 0: if marker.id >= 0:
detected_marker_data[marker.id] = {'marker_center': marker.getCenter()}
if marker.id in self.corner_marker_ids.values(): if marker.id in self.corner_marker_ids.values():
marker.calculateExtrinsics(0.1, self.camparam) marker.calculateExtrinsics(0.075, self.camparam)
else: else:
marker.calculateExtrinsics(0.07, self.camparam) marker.calculateExtrinsics(0.07, self.camparam)
detected_marker_data[marker.id]['Rvec'] = marker.Rvec detected_marker_data[marker.id]['Rvec'] = marker.Rvec
detected_marker_data[marker.id]['Tvec'] = marker.Tvec detected_marker_data[marker.id]['Tvec'] = marker.Tvec
if marker.id >= 0: # draw markers onto the image
if draw_markers: if draw_markers:
marker.draw(color_image, np.array([255, 255, 255]), 2, True) marker.draw(color_image, np.array([255, 255, 255]), 2, True)
@ -182,6 +217,10 @@ class ArucoEstimator:
color_image = self.draw_grid_lines(color_image, detected_marker_data) color_image = self.draw_grid_lines(color_image, detected_marker_data)
color_image = self.draw_robot_pos(color_image, detected_marker_data) color_image = self.draw_robot_pos(color_image, detected_marker_data)
# draw drag
if self.drag_line_start is not None and self.drag_line_end is not None:
color_image = cv2.line(color_image, self.drag_line_start, self.drag_line_end, color=(0, 0, 255), thickness=2)
# Show images # Show images
cv2.imshow('RoboRally', color_image) cv2.imshow('RoboRally', color_image)
key = cv2.waitKey(1) key = cv2.waitKey(1)
@ -192,7 +231,7 @@ class ArucoEstimator:
self.draw_grid = not self.draw_grid self.draw_grid = not self.draw_grid
if key == ord('q'): if key == ord('q'):
running = False running = False
if key == ord('a'): if key == ord('x'):
color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1] color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1]
if color_sensor.get_option(rs.option.enable_auto_exposure) == 1.0: if color_sensor.get_option(rs.option.enable_auto_exposure) == 1.0:
color_sensor.set_option(rs.option.enable_auto_exposure, False) color_sensor.set_option(rs.option.enable_auto_exposure, False)
@ -200,6 +239,8 @@ class ArucoEstimator:
else: else:
color_sensor.set_option(rs.option.enable_auto_exposure, True) color_sensor.set_option(rs.option.enable_auto_exposure, True)
print("auto exposure ON") print("auto exposure ON")
if key == ord('i'):
invert_grayscale = not invert_grayscale
finally: finally:
cv2.destroyAllWindows() cv2.destroyAllWindows()
if self.pipeline is not None: if self.pipeline is not None:
@ -244,7 +285,7 @@ class ArucoEstimator:
_, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat) _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat)
angle = -euler_angles[2][0] * np.pi / 180.0 angle = -euler_angles[2][0] * np.pi / 180.0
self.robot_marker_estimates[marker_id] = {'t': t_image, 'x': x, 'y': y, 'angle': angle} self.robot_marker_estimates[marker_id] = {'t': float(t_image), 'x': float(x), 'y': float(y), 'angle': float(angle)}
def all_corners_detected(self): def all_corners_detected(self):
# checks if all corner markers have been detected at least once # checks if all corner markers have been detected at least once