diff --git a/remote_control/pid_controller.py b/remote_control/pid_controller.py new file mode 100644 index 0000000..7ba4687 --- /dev/null +++ b/remote_control/pid_controller.py @@ -0,0 +1,292 @@ +import numpy as np +import math +import time + + +class PIDController: + def __init__(self, estimator): + self.t = time.time() + + self.estimator = estimator + self.controlling = False + + self.P_angle = 0.5 # PID parameters determined by Ziegler-Nichols. measured K_crit = 1.4, T_crit = 1.5 + self.I_angle = 0.4 + self.D_angle = 0.1 + + self.P_pos = 0.50 + self.I_pos = 0.3 + self.D_pos = 0.1 + + self.mode = None + + def move_to_pos(self, target_pos, robot, near_target_counter=5): + near_target = 0 + while near_target < near_target_counter: + while not self.estimator.event_queue.empty(): + event = self.estimator.event_queue.get() + print("event: ", event) + if event[0] == 'click': + pass + elif event[0] == 'key': + key = event[1] + + if key == 84: # arrow up + self.controlling = True + self.t = time.time() + elif key == 82: # arrow down + self.controlling = False + robot.send_cmd() + elif key == 48: # 0 + target_pos = np.array([0.0, 0.0, 0.0]) + elif key == 43: # + + self.control_scaling += 0.1 + self.control_scaling = min(self.control_scaling, 1.0) + print("control scaling = ", self.control_scaling) + elif key == 45: # - + self.control_scaling -= 0.1 + self.control_scaling = max(self.control_scaling, 0.1) + print("control scaling = ", self.control_scaling) + elif key == 113: + print("quit!") + self.controlling = False + robot.send_cmd() + return + elif key == 27: # escape + print("quit!") + self.controlling = False + robot.send_cmd() + return + + x_pred = self.get_measurement(robot.id) + + if x_pred is not None: + error_pos = np.linalg.norm(x_pred[0:2] - target_pos[0:2]) + angles_unwrapped = np.unwrap([x_pred[2], target_pos[2]]) # unwrap angle to avoid jump in data + error_ang = np.abs(angles_unwrapped[0] - angles_unwrapped[1]) + # print("error pos = ", error_pos) + # print("error_pos = {}, error_ang = {}".format(error_pos, error_ang)) + + # if error_pos > 0.075 or error_ang > 0.35: + if error_pos > 0.05 or error_ang > 0.1: + # solve mpc open loop problem + res = self.ols.solve(x_pred, target_pos) + + # us1 = res[0] + # us2 = res[1] + us1 = res[0] * self.control_scaling + us2 = res[1] * self.control_scaling + # print("u = {}", (us1, us2)) + + # print("---------------- mpc solution took {} seconds".format(tmpc_end - tmpc_start)) + dt_mpc = time.time() - self.t + if dt_mpc < self.dt: # wait until next control can be applied + # print("sleeping for {} seconds...".format(self.dt - dt_mpc)) + time.sleep(self.dt - dt_mpc) + else: + us1 = [0] * self.mstep + us2 = [0] * self.mstep + near_target += 1 + + # send controls to the robot + for i in range(0, self.mstep): # option to use multistep mpc if len(range) > 1 + u1 = us1[i] + u2 = us2[i] + robot.send_cmd(u1, u2) + if i < self.mstep: + time.sleep(self.dt) + self.t = time.time() # save time the most recent control was applied + else: + print("robot not detected yet!") + + def interactive_control(self, robots): + controlled_robot_number = 0 + robot = robots[controlled_robot_number] + + ts = [] + angles = [] + + target_pos = np.array([0.0, 0.0, 0.0]) + e_angle_old = 0.0 + e_pos_old = 0.0 + + i = 0.0 + i_angle = 0.0 + i_pos = 0.0 + + t0 = time.time() + + running = True + while running: + # handle events from opencv window + while not self.estimator.event_queue.empty(): + event = self.estimator.event_queue.get() + print("event: ", event) + if event[0] == 'click': + target_pos = event[1] + i_angle = 0 + i_pos = 0 + e_pos_old = 0 + e_angle_old = 0 + self.mode = 'combined' + elif event[0] == 'key': + key = event[1] + + if key == 32: # arrow up + self.controlling = not self.controlling + if not self.controlling: + print("disable control") + robot.send_cmd() # stop robot + self.mode = None # reset control mode + else: + print("enable control") + i = 0 + self.t = time.time() + elif key == 48: # 0 + target_pos = np.array([0.0, 0.0, 0.0]) # TODO: use center of board for target pos + elif key == 97: # a + self.mode = 'angle' + e_angle_old = 0 + i = 0 + self.t = time.time() + elif key == 99: # c + self.mode = 'combined' + e_angle_old = 0 + e_pos_old = 0 + i_angle = 0 + i_pos = 0 + self.t = time.time() + elif key == 112: # p + self.mode = 'position' + e_pos_old = 0 + i = 0 + self.t = time.time() + elif key == 43: # + + self.P_pos += 0.1 + print("P = ", self.P_angle) + elif key == 45: # - + self.P_pos -= 0.1 + print("P = ", self.P_angle) + elif key == 9: # TAB + # switch controlled robot + robot.send_cmd() # stop current robot + controlled_robot_number = (controlled_robot_number + 1) % len(robots) + robot = robots[controlled_robot_number] + print(f"controlled robot: {robot.id}") + elif key == 113 or key == 27: # q or ESCAPE + print("quit!") + self.controlling = False + robot.send_cmd() + return + + if self.controlling: + # measure state + x_pred = self.get_measurement(robot.id) + dt = self.t - time.time() + + + + #print(f"x_pred = {x_pred}\ntarget = {target_pos}\nerror = {np.linalg.norm(target_pos - x_pred)}\n") + if self.mode == 'angle': + # compute angle such that robot faces to target point + target_angle = target_pos[2] + + ts.append(time.time() - t0) + angles.append(x_pred[2]) + + angles_unwrapped = np.unwrap([x_pred[2], target_angle]) # unwrap angle to avoid jump in data + + e_angle = angles_unwrapped[0] - angles_unwrapped[1] # angle difference + p = self.P_angle * e_angle + # i += self.I * dt * e # right Riemann sum + i += self.I_angle * dt * (e_angle + e_angle_old)/2.0 # trapezoidal rule + d = self.D_angle * (e_angle - e_angle_old)/dt + + u1 = p - i - d + u2 = - u1 + + e_angle_old = e_angle + elif self.mode == 'position': + # we assume that we face straight to the target position + v = target_pos[0:2] - x_pred[0:2] + # check if robot faces in same direction as pos-target-vector + target_angle = math.atan2(v[1], v[0]) + current_angle = x_pred[2] + + angles_unwrapped = np.unwrap([current_angle, target_angle]) # unwrap angle to avoid jump in data + alpha = angles_unwrapped[0] + beta = angles_unwrapped[1] + + epsilon = 0.3 + + + if not (abs(alpha - beta) < epsilon or abs(alpha - beta + np.pi) < epsilon or abs(alpha - beta - np.pi) < epsilon): + print("switch to angle mode") + u1 = u2 = 0.0 + robot.send_cmd(u1, u2) + self.mode = 'angle' + elif abs(alpha - beta) < epsilon: + # forward + e = np.linalg.norm(x_pred[0:2] - target_pos[0:2]) # compute euclidean distance to target + p = self.P * e + # i += self.I * dt * e # right Riemann sum + i += self.I * dt * (e + e_old) / 2.0 # trapezoidal rule + d = self.D * (e - e_old) / dt + + u1 = p - i + d + u2 = u1 + else: + # backward + e = np.linalg.norm(x_pred[0:2] - target_pos[0:2]) # compute euclidean distance to target + p = self.P * e + # i += self.I * dt * e # right Riemann sum + i += self.I * dt * (e + e_old) / 2.0 # trapezoidal rule + d = self.D * (e - e_old) / dt + + u1 = -(p - i + d) + u2 = u1 + + e_old = e + #else: + # print("switching to angle mode") + # u1 = u2 = 0.0 + # self.mode = 'angle' + + elif self.mode == 'combined': + # compute angle such that robot faces to target point + v = target_pos[0:2] - x_pred[0:2] + target_angle = math.atan2(v[1], v[0]) + + angles_unwrapped = np.unwrap([x_pred[2], target_angle]) # unwrap angle to avoid jump in data + + e_angle = angles_unwrapped[0] - angles_unwrapped[1] # angle difference + e_pos = np.linalg.norm(v) + + p_angle = self.P_angle * e_angle + i_angle += self.I_angle * dt * (e_angle + e_angle_old) / 2.0 # trapezoidal rule + d_angle = self.D_angle * (e_angle - e_angle_old) / dt + + p_pos = self.P_pos * e_pos + i_pos += self.I_pos * dt * (e_pos + e_pos_old) / 2.0 # trapezoidal rule + d_pos = self.D_pos * (e_pos - e_pos_old) / dt + + u1 = p_angle + p_pos - i_angle - i_pos + u2 = - p_angle + p_pos + i_angle - i_pos + + e_pos_old = e_pos + e_angle_old = e_angle + + if e_pos < 0.05: + self.mode = 'angle' + + else: + u1 = 0.0 + u2 = 0.0 + #print(f"u = ({u1}, {u2})") + robot.send_cmd(u1, u2) + self.t = time.time() # save time when the most recent control was applied + time.sleep(0.1) + + + def get_measurement(self, robot_id): + return self.estimator.get_robot_state_estimate(robot_id)