From 6b9373cce5d221f6542ca101813ec41ebe03ba30 Mon Sep 17 00:00:00 2001 From: Simon Pirkelmann Date: Sat, 14 Nov 2020 15:01:01 +0100 Subject: [PATCH] enabled better mouse interaction --- remote_control/aruco_estimator.py | 101 +++++++++++++++++++++--------- 1 file changed, 71 insertions(+), 30 deletions(-) diff --git a/remote_control/aruco_estimator.py b/remote_control/aruco_estimator.py index 6a3bc30..275870f 100644 --- a/remote_control/aruco_estimator.py +++ b/remote_control/aruco_estimator.py @@ -3,6 +3,7 @@ import numpy as np import cv2 import os import time +import math from shapely.geometry import LineString from queue import Queue @@ -87,7 +88,38 @@ class ArucoEstimator: self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "realsense.yml")) else: self.camparam.readFromXMLFile(os.path.join(os.path.dirname(__file__), "dfk72_6mm_param2.yml")) - print(self.camparam) + + self.drag_line_end = None + self.drag_line_start = None + self.previous_click = None + + def compute_clicked_position(self, px, py): + if self.all_corners_detected(): + # inter/extrapolate from clicked point to marker position + px1 = self.corner_estimates['a']['pixel_coordinate'][0] + px3 = self.corner_estimates['c']['pixel_coordinate'][0] + py1 = self.corner_estimates['a']['pixel_coordinate'][1] + py3 = self.corner_estimates['c']['pixel_coordinate'][1] + + x1 = self.corner_estimates['a']['x'] + x3 = self.corner_estimates['c']['x'] + y1 = self.corner_estimates['a']['y'] + y3 = self.corner_estimates['c']['y'] + + alpha = (px - px1) / (px3 - px1) + beta = (py - py1) / (py3 - py1) + + print(f"alpha = {alpha}, beta = {beta}") + + target_x = x1 + alpha * (x3 - x1) + target_y = y1 + beta * (y3 - y1) + target = np.array([target_x, target_y]) + + #print(f"target = ({target_x},{target_y})") + else: + print("not all markers have been detected!") + target = np.array([px, -py]) + return target def mouse_callback(self, event, px, py, flags, param): """ @@ -98,32 +130,35 @@ class ArucoEstimator: :param px: x-position of event :param py: y-position of event """ + target = None + if event == cv2.EVENT_LBUTTONDOWN: - if self.all_corners_detected(): - # inter/extrapolate from clicked point to marker position - px1 = self.corner_estimates['a']['pixel_coordinate'][0] - px3 = self.corner_estimates['c']['pixel_coordinate'][0] - py1 = self.corner_estimates['a']['pixel_coordinate'][1] - py3 = self.corner_estimates['c']['pixel_coordinate'][1] - - x1 = self.corner_estimates['a']['x'] - x3 = self.corner_estimates['c']['x'] - y1 = self.corner_estimates['a']['y'] - y3 = self.corner_estimates['c']['y'] - - alpha = (px - px1) / (px3 - px1) - beta = (py - py1) / (py3 - py1) - - print(f"alpha = {alpha}, beta = {beta}") - - target_x = x1 + alpha * (x3 - x1) - target_y = y1 + beta * (y3 - y1) - target = np.array([target_x, target_y, 0]) - - print(f"target = ({target_x},{target_y})") - self.event_queue.put(('click', target)) + self.drag_line_start = (px, py) + elif event == cv2.EVENT_LBUTTONUP: + self.drag_line_end = (px, py) + target_pos = self.compute_clicked_position(self.drag_line_start[0], self.drag_line_start[1]) + if self.drag_line_start != self.drag_line_end: + # compute target angle for clicked position + facing_pos = self.compute_clicked_position(px, py) + v = facing_pos - target_pos + target_angle = math.atan2(v[1], v[0]) else: - print("not all markers have been detected!") + # determine angle from previously clicked pos (= self.drag_line_end) + if self.previous_click is not None: + previous_pos = self.compute_clicked_position(self.previous_click[0], self.previous_click[1]) + v = target_pos - previous_pos + target_angle = math.atan2(v[1], v[0]) + else: + target_angle = 0.0 + + target = np.array([target_pos[0], target_pos[1], target_angle]) + print(target) + self.previous_click = (px, py) + self.event_queue.put(('click', {'x': target[0], 'y': target[1], 'angle': target[2]})) + self.drag_line_start = None + elif event == cv2.EVENT_MOUSEMOVE: + if self.drag_line_start is not None: + self.drag_line_end = (px, py) def run_tracking(self, draw_markers=True, draw_marker_coordinate_system=False, invert_grayscale=False): """ @@ -157,16 +192,16 @@ class ArucoEstimator: # extract data for detected markers detected_marker_data = {} for marker in detected_markers: - detected_marker_data[marker.id] = {'marker_center': marker.getCenter()} if marker.id >= 0: + detected_marker_data[marker.id] = {'marker_center': marker.getCenter()} + if marker.id in self.corner_marker_ids.values(): - marker.calculateExtrinsics(0.1, self.camparam) + marker.calculateExtrinsics(0.075, self.camparam) else: marker.calculateExtrinsics(0.07, self.camparam) detected_marker_data[marker.id]['Rvec'] = marker.Rvec detected_marker_data[marker.id]['Tvec'] = marker.Tvec - if marker.id >= 0: # draw markers onto the image if draw_markers: marker.draw(color_image, np.array([255, 255, 255]), 2, True) @@ -182,6 +217,10 @@ class ArucoEstimator: color_image = self.draw_grid_lines(color_image, detected_marker_data) color_image = self.draw_robot_pos(color_image, detected_marker_data) + # draw drag + if self.drag_line_start is not None and self.drag_line_end is not None: + color_image = cv2.line(color_image, self.drag_line_start, self.drag_line_end, color=(0, 0, 255), thickness=2) + # Show images cv2.imshow('RoboRally', color_image) key = cv2.waitKey(1) @@ -192,7 +231,7 @@ class ArucoEstimator: self.draw_grid = not self.draw_grid if key == ord('q'): running = False - if key == ord('a'): + if key == ord('x'): color_sensor = self.pipeline.get_active_profile().get_device().query_sensors()[1] if color_sensor.get_option(rs.option.enable_auto_exposure) == 1.0: color_sensor.set_option(rs.option.enable_auto_exposure, False) @@ -200,6 +239,8 @@ class ArucoEstimator: else: color_sensor.set_option(rs.option.enable_auto_exposure, True) print("auto exposure ON") + if key == ord('i'): + invert_grayscale = not invert_grayscale finally: cv2.destroyAllWindows() if self.pipeline is not None: @@ -244,7 +285,7 @@ class ArucoEstimator: _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(pose_mat) angle = -euler_angles[2][0] * np.pi / 180.0 - self.robot_marker_estimates[marker_id] = {'t': t_image, 'x': x, 'y': y, 'angle': angle} + self.robot_marker_estimates[marker_id] = {'t': float(t_image), 'x': float(x), 'y': float(y), 'angle': float(angle)} def all_corners_detected(self): # checks if all corner markers have been detected at least once