diff --git a/remote_control/casadi_opt.py b/remote_control/casadi_opt.py index dccbb30..c20167b 100644 --- a/remote_control/casadi_opt.py +++ b/remote_control/casadi_opt.py @@ -4,13 +4,15 @@ import time # look at: https://github.com/casadi/casadi/blob/master/docs/examples/python/vdp_indirect_multiple_shooting.py class OpenLoopSolver: - def __init__(self, N=10, T=2.0): + def __init__(self, N=20, T=4.0): self.T = T self.N = N self.opti_x0 = None self.opti_lam_g0 = None + self.use_warmstart = True + def setup(self): x = SX.sym('x') y = SX.sym('y') @@ -135,6 +137,8 @@ class OpenLoopSolver: #plt.show() #return + + def solve(self, x0, target, obstacles): # alternative solution using multiple shooting (way faster!) self.opti = Opti() # Optimization problem @@ -143,6 +147,8 @@ class OpenLoopSolver: self.Q = self.opti.variable(1,self.N+1) # state trajectory self.U = self.opti.variable(2,self.N) # control trajectory (throttle) + + self.slack = self.opti.variable(1,1) #T = self.opti.variable() # final time # ---- objective --------- @@ -158,7 +164,7 @@ class OpenLoopSolver: #self.opti.set_initial(T, 1) - def solve(self, x0, target): + tstart = time.time() x = SX.sym('x') @@ -205,12 +211,12 @@ class OpenLoopSolver: q_next = self.Q[:, k] + dt / 6 * (k1_q + 2 * k2_q + 2 * k3_q + k4_q) self.opti.subject_to(self.X[:, k + 1] == x_next) # close the gaps self.opti.subject_to(self.Q[:, k + 1] == q_next) # close the gaps - self.opti.minimize(self.Q[:, self.N]) + self.opti.minimize(self.Q[:, self.N] + 1.0e5 * self.slack**2) # ---- path constraints ----------- # limit = lambda pos: 1-sin(2*pi*pos)/2 # self.opti.subject_to(speed<=limit(pos)) # track speed limit - maxcontrol = 0.950 + maxcontrol = 0.95 self.opti.subject_to(self.opti.bounded(-maxcontrol, self.U, maxcontrol)) # control is limited # ---- boundary conditions -------- @@ -227,10 +233,11 @@ class OpenLoopSolver: # self.opti.subject_to(X[2,:]>=-2) # Time must be positive # avoid obstacle - # r = 0.25 - # p = (0.5, 0.5) - # for k in range(self.N): - # self.opti.subject_to((X[0,k]-p[0])**2 + (X[1,k]-p[1])**2 > r**2) + for o in obstacles: + p = obstacles[o].pos + r = obstacles[o].radius + for k in range(1,self.N): + self.opti.subject_to((self.X[0,k]-p[0])**2 + (self.X[1,k]-p[1])**2 + self.slack > r**2) # pass posx = self.X[0, :] posy = self.X[1, :] @@ -241,12 +248,15 @@ class OpenLoopSolver: self.opti.subject_to(angle[0] == x0[2]) # finish line at position 1 tend = time.time() - print("setting up problem took {} seconds".format(tend - tstart)) + print("setting up problem took {} seconds".format(tend - tstart)) - if self.opti_x0 is not None: + tstart = time.time() + if self.use_warmstart and self.opti_x0 is not None: self.opti.set_initial(self.opti.lam_g, self.opti_lam_g0) self.opti.set_initial(self.opti.x, self.opti_x0) sol = self.opti.solve() # actual solve + tend = time.time() + print("solving the problem took {} seconds".format(tend - tstart)) self.opti_x0 = sol.value(self.opti.x) self.opti_lam_g0 = sol.value(self.opti.lam_g) @@ -256,7 +266,7 @@ class OpenLoopSolver: u_opt_1 = sol.value(self.U[0,:]) u_opt_2 = sol.value(self.U[1,:]) - return (u_opt_1, u_opt_2) + return (u_opt_1, u_opt_2, sol.value(posx), sol.value(posy)) #lam_g0 = sol.value(self.opti.lam_g) diff --git a/remote_control/position_controller.py b/remote_control/position_controller.py index 4cef0f1..922ffcf 100644 --- a/remote_control/position_controller.py +++ b/remote_control/position_controller.py @@ -18,6 +18,7 @@ from copy import deepcopy import matplotlib.pyplot as plt import matplotlib.animation as anim +import matplotlib.patches as patch import time @@ -37,6 +38,12 @@ class Robot: self.ip = ip +class Obstacle: + def __init__(self, id, radius): + self.id = id + self.pos = None + self.radius = radius + def f_ode(t, x, u): # dynamical model of the two-wheeled robot # TODO: find exact values for these parameters @@ -62,19 +69,28 @@ def f_ode(t, x, u): class RemoteController: def __init__(self): - self.robots = [Robot(3)] + self.robots = [Robot(3, '192.168.1.103')] self.robot_ids = {} for r in self.robots: self.robot_ids[r.id] = r + obst = [Obstacle(6, 0.2), Obstacle(5, 0.2), Obstacle(8, 0.2)] + + self.obstacles = {} + for r in obst: + self.obstacles[r.id] = r + # connect to robot self.rc_socket = socket.socket() + #self.rc_socket = None try: - pass - self.rc_socket.connect(('192.168.1.103', 1234)) # connect to robot + for r in self.robots: + self.rc_socket.connect((r.ip, 1234)) # connect to robot except socket.error: print("could not connect to socket") + self.rc_socket = None + self.t = time.time() @@ -89,6 +105,10 @@ class RemoteController: self.tms = None self.xms = None + # variable for mpc open loop + self.ol_x = None + self.ol_y = None + self.mutex = threading.Lock() marker_sub = rospy.Subscriber("/marker_id_pos_angle", id_pos_angle, self.measurement_callback) @@ -113,10 +133,19 @@ class RemoteController: self.fig = plt.figure() self.ax = self.fig.add_subplot(1,1,1) self.xdata, self.ydata = [], [] - self.line, = self.ax.plot([],[]) + self.line, = self.ax.plot([],[], color='grey', linestyle=':') self.line_sim, = self.ax.plot([], []) + self.line_ol, = self.ax.plot([],[], color='green', linestyle='--') self.dirm, = self.ax.plot([], []) self.dirs, = self.ax.plot([], []) + + self.circles = [] + for o in self.obstacles: + self.circles.append(patch.Circle((0.0, 0.0), radius=0.1, fill=False, color='red', linestyle='--')) + + for s in self.circles: + self.ax.add_artist(s) + plt.xlabel('x-position') plt.ylabel('y-position') plt.grid() @@ -136,7 +165,7 @@ class RemoteController: self.ax.set_ylim(-2, 2) self.ax.set_aspect('equal', adjustable='box') - return self.line, self.line_sim, self.dirm, self.dirs, + return self.line, self.line_sim, self.dirm, self.dirs, self.line_ol, self.circles[0], self.circles[1],self.circles[2], def ani_update(self, frame): #print("plotting") @@ -155,8 +184,8 @@ class RemoteController: a = xm_local[-1, 0] b = xm_local[-1, 1] - a2 = a + np.cos(xm_local[-1, 2]) * 1.0 - b2 = b + np.sin(xm_local[-1, 2]) * 1.0 + a2 = a + np.cos(xm_local[-1, 2]) * 0.2 + b2 = b + np.sin(xm_local[-1, 2]) * 0.2 self.dirm.set_data(np.array([a, a2]), np.array([b, b2])) @@ -171,14 +200,34 @@ class RemoteController: a = xs_local[-1, 0] b = xs_local[-1, 1] - a2 = a + np.cos(xs_local[-1, 2]) * 1.0 - b2 = b + np.sin(xs_local[-1, 2]) * 1.0 + a2 = a + np.cos(xs_local[-1, 2]) * 0.2 + b2 = b + np.sin(xs_local[-1, 2]) * 0.2 self.dirs.set_data(np.array([a, a2]), np.array([b, b2])) + + ol_x_local = deepcopy(self.ol_x) + ol_y_local = deepcopy(self.ol_y) + + if ol_x_local is not None: + self.line_ol.set_data(ol_x_local, ol_y_local) + else: + self.line_ol.set_data([],[]) + + + i = 0 + obst_keys = self.obstacles.keys() + for s in self.circles: + o = self.obstacles[obst_keys[i]] + i = i + 1 + + if o.pos is not None: + s.center = o.pos + s.radius = o.radius + finally: self.mutex.release() - return self.line, self.line_sim, self.dirm, self.dirs, + return self.line, self.line_sim, self.dirm, self.dirs, self.line_ol, self.circles[0], self.circles[1],self.circles[2], def measurement_callback(self, data): #print("data = {}".format(data)) @@ -211,6 +260,10 @@ class RemoteController: finally: self.mutex.release() + if data.id in self.obstacles.keys(): + obst = (data.x, data.y) + self.obstacles[data.id].pos = obst + def controller(self): tgrid = None us1 = None @@ -348,54 +401,6 @@ class RemoteController: #print("speed = {}".format(self.speed)) - elif pid: - # pid controller - - events = pygame.event.get() - for event in events: - if event.type == pygame.KEYDOWN: - if event.key == pygame.K_LEFT: - self.ii = self.ii / np.sqrt(np.sqrt(np.sqrt(10.0))) - print("ii = {}".format(self.pp)) - elif event.key == pygame.K_RIGHT: - self.ii = self.ii * np.sqrt(np.sqrt(np.sqrt(10.0))) - print("ii = {}".format(self.pp)) - elif event.key == pygame.K_UP: - self.controlling = True - elif event.key == pygame.K_DOWN: - self.controlling = False - self.rc_socket.send('({},{})\n'.format(0, 0)) - - dt = 0.05 - - if self.controlling: - # test: turn robot such that angle is zero - for r in self.robots: - if r.euler is not None: - self.k = self.k + 1 - - alpha = r.euler - self.alphas.append(alpha) - - # compute error - e = alpha - 0 - - # compute integral of error (approximately) - self.inc += e * dt - - # PID - p = self.pp * e - i = self.ii * self.inc - d = 0.0 - - # compute controls for robot from PID - u1 = p + i + d - u2 = - p - i - d - print("alpha = {}, u = ({}, {})".format(alpha, u1, u2)) - self.rc_socket.send('({},{})\n'.format(u1, u2)) - - time.sleep(dt) - elif open_loop_solve: # open loop controller @@ -407,7 +412,8 @@ class RemoteController: self.t = time.time() elif event.key == pygame.K_DOWN: self.controlling = False - self.rc_socket.send('(0.1, 0.0,0.0)\n') + if self.rc_socket: + self.rc_socket.send('(0.0,0.0)\n') elif event.key == pygame.K_0: self.target = (0.0, 0.0, 0.0) elif event.key == pygame.K_1: @@ -419,7 +425,6 @@ class RemoteController: elif event.key == pygame.K_4: self.target = (-0.5,0.5, 0.0) if self.controlling: - tmpc_start = time.time() # get measurement self.mutex.acquire() try: @@ -428,24 +433,33 @@ class RemoteController: finally: self.mutex.release() - print("current measurement (t, x) = ({}, {})".format(last_time, last_measurement)) - print("current control (u1, u2) = ({}, {})".format(u1, u2)) + #print("current measurement (t, x) = ({}, {})".format(last_time, last_measurement)) + #print("current control (u1, u2) = ({}, {})".format(u1, u2)) # prediction of state at time the mpc will terminate r.set_f_params(np.array([u1 * omega_max, u2 * omega_max])) r.set_initial_value(last_measurement, last_time) dt = self.ols.T/self.ols.N - print("integrating for {} seconds".format((dt))) + #print("integrating for {} seconds".format((dt))) x_pred = r.integrate(r.t + (dt)) - print("predicted initial state x_pred = ({})".format(x_pred)) + #print("predicted initial state x_pred = ({})".format(x_pred)) - res = self.ols.solve(x_pred, self.target) + tmpc_start = time.time() + + res = self.ols.solve(x_pred, self.target, self.obstacles) #tgrid = res[0] us1 = res[0] us2 = res[1] + self.mutex.acquire() + try: + self.ol_x = res[2] + self.ol_y = res[3] + finally: + self.mutex.release() + # tt = 0 # x = last_measurement # t_ol = np.array([tt]) @@ -482,16 +496,17 @@ class RemoteController: finally: self.mutex.release() - print("(last_time, second_time, dt) = ({}, {}, {})".format(last_time, second_time, second_time - last_time)) - print("mismatch between predicted state and measured state: {}\n\n".format(second_measurement - last_measurement)) + #print("(last_time, second_time, dt) = ({}, {}, {})".format(last_time, second_time, second_time - last_time)) + #print("mismatch between predicted state and measured state: {}\n\n".format(second_measurement - last_measurement)) for i in range(0, 1): u1 = us1[i] u2 = us2[i] #self.rc_socket.send('({},{},{})\n'.format(dt,u1, u2)) - self.rc_socket.send('({},{})\n'.format(u1, u2)) + if self.rc_socket: + self.rc_socket.send('({},{})\n'.format(u1, u2)) self.t = time.time() - time.sleep(0.2) + #time.sleep(0.2) # @@ -506,7 +521,7 @@ def main(args): screenheight = 480 screenwidth = 640 - screen = pygame.display.set_mode([screenwidth, screenheight]) + pygame.display.set_mode([screenwidth, screenheight]) threading.Thread(target=rc.controller).start()