322 lines
11 KiB
Python
322 lines
11 KiB
Python
from casadi import *
|
|
import time
|
|
|
|
# look at: https://github.com/casadi/casadi/blob/master/docs/examples/python/vdp_indirect_multiple_shooting.py
|
|
|
|
class OpenLoopSolver:
|
|
def __init__(self, N=20, T=4.0):
|
|
self.T = T
|
|
self.N = N
|
|
|
|
self.opti_x0 = None
|
|
self.opti_lam_g0 = None
|
|
|
|
self.use_warmstart = True
|
|
|
|
def setup(self):
|
|
x = SX.sym('x')
|
|
y = SX.sym('y')
|
|
theta = SX.sym('theta')
|
|
state = vertcat(x, y, theta)
|
|
r = 0.03
|
|
R = 0.05
|
|
d = 0.02
|
|
#omega_max = 13.32
|
|
omega_max = 10.0
|
|
|
|
omegar = SX.sym('omegar')
|
|
omegal = SX.sym('omegal')
|
|
control = vertcat(omegar, omegal)
|
|
|
|
# model equation
|
|
f1 = (r / 2 * cos(theta) - r * d / (2 * R) * sin(theta)) * omegar * omega_max + (r / 2 * cos(theta) + r * d / (2 * R) * sin(
|
|
theta)) * omegal * omega_max
|
|
f2 = (r / 2 * sin(theta) + r * d / (2 * R) * cos(theta)) * omegar * omega_max + (r / 2 * sin(theta) - r * d / (2 * R) * cos(
|
|
theta)) * omegal * omega_max
|
|
f3 = -(r / (2 * R) * omegar - r / (2 * R) * omegal) * omega_max
|
|
xdot = vertcat(f1, f2, f3)
|
|
f = Function('f', [x, y, theta, omegar, omegal], [f1, f2, f3])
|
|
print("f = {}".format(f))
|
|
|
|
# cost functional
|
|
target = (-0.0, 0.0)
|
|
L = (x-target[0]) ** 2 + (y-target[1]) ** 2 + 1e-2 * theta ** 2 + 1e-2 * (omegar ** 2 + omegal ** 2)
|
|
|
|
# Fixed step Runge-Kutta 4 integrator
|
|
M = 4 # RK4 steps per interval
|
|
DT = self.T / self.N / M
|
|
print("DT = {}".format(DT))
|
|
self.f = Function('f', [state, control], [xdot, L])
|
|
X0 = MX.sym('X0', 3)
|
|
U = MX.sym('U', 2)
|
|
X = X0
|
|
Q = 0
|
|
runge_kutta = True
|
|
if runge_kutta:
|
|
for j in range(M):
|
|
k1, k1_q = self.f(X, U)
|
|
k2, k2_q = self.f(X + DT / 2 * k1, U)
|
|
k3, k3_q = self.f(X + DT / 2 * k2, U)
|
|
k4, k4_q = self.f(X + DT * k3, U)
|
|
X = X + DT / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
|
|
Q = Q + DT / 6 * (k1_q + 2 * k2_q + 2 * k3_q + k4_q)
|
|
else:
|
|
DT = self.T / self.N
|
|
k1, k1_q = f(X, U)
|
|
X = X + DT * k1
|
|
Q = Q + DT * k1_q
|
|
F = Function('F', [X0, U], [X, Q], ['x0', 'p'], ['xf', 'qf'])
|
|
|
|
# F_euler = Function('F_euler', [X0, U], [Xeuler, Qeuler], ['x0', 'p'], ['xf', 'qf'])
|
|
|
|
Fk = F(x0=[0.2, 0.3, 0.0], p=[-1.1, 1.1])
|
|
print(Fk['xf'])
|
|
print(Fk['qf'])
|
|
|
|
# Start with an empty NLP
|
|
w = []
|
|
w0 = []
|
|
lbw = []
|
|
ubw = []
|
|
J = 0
|
|
g = []
|
|
lbg = []
|
|
ubg = []
|
|
|
|
# Formulate the NLP
|
|
# Xk = MX(x0)
|
|
# for k in range(self.N):
|
|
# # New NLP variable for the control
|
|
# U1k = MX.sym('U1_' + str(k), 2)
|
|
# # U2k = MX.sym('U2_' + str(k))
|
|
# w += [U1k]
|
|
# lbw += [-0.5, -0.5]
|
|
# ubw += [0.5, 0.5]
|
|
# w0 += [0, 0]
|
|
#
|
|
# # Integrate till the end of the interval
|
|
# Fk = F(x0=Xk, p=U1k)
|
|
# Xk = Fk['xf']
|
|
# J = J + Fk['qf']
|
|
#
|
|
# # Add inequality constraint
|
|
# # g += [Xk[1]]
|
|
# # lbg += [-.0]
|
|
# # ubg += [inf]
|
|
#
|
|
# # Create an NLP solver
|
|
# prob = {'f': J, 'x': vertcat(*w), 'g': vertcat(*g)}
|
|
# self.solver = nlpsol('solver', 'ipopt', prob)
|
|
|
|
# Solve the NLP
|
|
if False:
|
|
sol = self.solver(x0=w0, lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg)
|
|
w_opt = sol['x']
|
|
|
|
# Plot the solution
|
|
u_opt = w_opt
|
|
x_opt = [self.x0]
|
|
for k in range(self.N):
|
|
Fk = F(x0=x_opt[-1], p=u_opt[2*k:2*k+2])
|
|
x_opt += [Fk['xf'].full()]
|
|
x1_opt = [r[0] for r in x_opt]
|
|
x2_opt = [r[1] for r in x_opt]
|
|
x3_opt = [r[2] for r in x_opt]
|
|
|
|
tgrid = [self.T/self.N*k for k in range(self.N+1)]
|
|
#import matplotlib.pyplot as plt
|
|
#plt.figure(2)
|
|
#plt.clf()
|
|
#plt.plot(tgrid, x1_opt, '--')
|
|
#plt.plot(tgrid, x2_opt, '-')
|
|
#plt.plot(tgrid, x3_opt, '*')
|
|
#plt.step(tgrid, vertcat(DM.nan(1), u_opt), '-.')
|
|
#plt.xlabel('t')
|
|
#plt.legend(['x1','x2','x3','u'])
|
|
#plt.grid()
|
|
#plt.show()
|
|
#return
|
|
|
|
|
|
def solve(self, x0, target, obstacles, track):
|
|
tstart = time.time()
|
|
# alternative solution using multiple shooting (way faster!)
|
|
self.opti = Opti() # Optimization problem
|
|
|
|
# ---- decision variables ---------
|
|
self.X = self.opti.variable(3,self.N+1) # state trajectory
|
|
self.Q = self.opti.variable(1,self.N+1) # state trajectory
|
|
|
|
self.U = self.opti.variable(2,self.N) # control trajectory (throttle)
|
|
|
|
self.slack = self.opti.variable(1,1)
|
|
#T = self.opti.variable() # final time
|
|
|
|
# ---- objective ---------
|
|
#self.opti.minimize(T) # race in minimal time
|
|
|
|
# ---- dynamic constraints --------
|
|
#f = lambda x,u: vertcat(f1, f2, f3) # dx/dt = f(x,u)
|
|
|
|
|
|
|
|
# ---- initial values for solver ---
|
|
#self.opti.set_initial(speed, 1)
|
|
#self.opti.set_initial(T, 1)
|
|
|
|
x = SX.sym('x')
|
|
y = SX.sym('y')
|
|
theta = SX.sym('theta')
|
|
state = vertcat(x, y, theta)
|
|
r = 0.03
|
|
R = 0.05
|
|
d = 0.02
|
|
omega_max = 13.32
|
|
|
|
omegar = SX.sym('omegar')
|
|
omegal = SX.sym('omegal')
|
|
control = vertcat(omegar, omegal)
|
|
|
|
# model equation
|
|
f1 = (r / 2 * cos(theta) - r * d / (2 * R) * sin(theta)) * omegar * omega_max + (r / 2 * cos(theta) + r * d / (
|
|
2 * R) * sin(
|
|
theta)) * omegal * omega_max
|
|
f2 = (r / 2 * sin(theta) + r * d / (2 * R) * cos(theta)) * omegar * omega_max + (r / 2 * sin(theta) - r * d / (
|
|
2 * R) * cos(
|
|
theta)) * omegal * omega_max
|
|
f3 = -(r / (2 * R) * omegar - r / (2 * R) * omegal) * omega_max
|
|
xdot = vertcat(f1, f2, f3)
|
|
L = (x - target[0]) ** 2 + (y - target[1]) ** 2 + 1e-2 * (theta - target[2]) ** 2 + 1e-2 * (omegar ** 2 + omegal ** 2)
|
|
self.f = Function('f', [state, control], [xdot, L])
|
|
# ---- solve NLP ------
|
|
|
|
# set numerical backend
|
|
|
|
|
|
# delete constraints
|
|
self.opti.subject_to()
|
|
|
|
# add new constraints
|
|
dt = self.T / self.N # length of a control interval
|
|
for k in range(self.N): # loop over control intervals
|
|
# Runge-Kutta 4 integration
|
|
k1, k1_q = self.f(self.X[:, k], self.U[:, k])
|
|
k2, k2_q = self.f(self.X[:, k] + dt / 2 * k1, self.U[:, k])
|
|
k3, k3_q = self.f(self.X[:, k] + dt / 2 * k2, self.U[:, k])
|
|
k4, k4_q = self.f(self.X[:, k] + dt * k3, self.U[:, k])
|
|
x_next = self.X[:, k] + dt / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
|
|
q_next = self.Q[:, k] + dt / 6 * (k1_q + 2 * k2_q + 2 * k3_q + k4_q)
|
|
self.opti.subject_to(self.X[:, k + 1] == x_next) # close the gaps
|
|
self.opti.subject_to(self.Q[:, k + 1] == q_next) # close the gaps
|
|
self.opti.minimize(self.Q[:, self.N] + 1.0e5 * self.slack**2)
|
|
|
|
# ---- path constraints -----------
|
|
# limit = lambda pos: 1-sin(2*pi*pos)/2
|
|
# self.opti.subject_to(speed<=limit(pos)) # track speed limit
|
|
maxcontrol = 0.9
|
|
self.opti.subject_to(self.opti.bounded(-maxcontrol, self.U, maxcontrol)) # control is limited
|
|
|
|
# ---- boundary conditions --------
|
|
|
|
# self.opti.subject_to(speed[-1]==0) # .. with speed 0
|
|
self.opti.subject_to(self.Q[:, 0] == 0.0)
|
|
|
|
|
|
solver = self.opti.solver("ipopt", {}, {"print_level": 0})
|
|
|
|
# ---- misc. constraints ----------
|
|
# self.opti.subject_to(X[1,:]>=0) # Time must be positive
|
|
# self.opti.subject_to(X[2,:]<=4) # Time must be positive
|
|
# self.opti.subject_to(X[2,:]>=-2) # Time must be positive
|
|
|
|
# avoid obstacle
|
|
for o in obstacles:
|
|
p = obstacles[o].pos
|
|
r = obstacles[o].radius
|
|
if p is not None:
|
|
for k in range(1,self.N):
|
|
self.opti.subject_to((self.X[0,k]-p[0])**2 + (self.X[1,k]-p[1])**2 + self.slack > r**2)
|
|
# keep inside track
|
|
# TODO
|
|
# track_ids = track.inner.keys()
|
|
# a = track.outer[track_ids[0]]
|
|
# b = track.outer[track_ids[1]]
|
|
# c = track.outer[track_ids[2]]
|
|
# d = track.outer[track_ids[3]]
|
|
#
|
|
# for k in range(1, self.N):
|
|
# self.opti.subject_to(self.opti.subject_to(self.X))
|
|
|
|
posx = self.X[0, :]
|
|
posy = self.X[1, :]
|
|
angle = self.X[2, :]
|
|
|
|
self.opti.subject_to(posx[0] == x0[0]) # start at position 0 ...
|
|
self.opti.subject_to(posy[0] == x0[1]) # ... from stand-still
|
|
self.opti.subject_to(angle[0] == x0[2]) # finish line at position 1
|
|
tend = time.time()
|
|
|
|
print("setting up problem took {} seconds".format(tend - tstart))
|
|
|
|
tstart = time.time()
|
|
if self.use_warmstart and self.opti_x0 is not None:
|
|
try:
|
|
self.opti.set_initial(self.opti.lam_g, self.opti_lam_g0)
|
|
self.opti.set_initial(self.opti.x, self.opti_x0)
|
|
except RuntimeError:
|
|
print("could not set warmstart")
|
|
sol = self.opti.solve() # actual solve
|
|
tend = time.time()
|
|
print("solving the problem took {} seconds".format(tend - tstart))
|
|
|
|
tstart = time.time()
|
|
self.opti_x0 = sol.value(self.opti.x)
|
|
self.opti_lam_g0 = sol.value(self.opti.lam_g)
|
|
|
|
#u_opt_1 = map(lambda x: float(x), [u_opt[i * 2] for i in range(0, 60)])
|
|
#u_opt_2 = map(lambda x: float(x), [u_opt[i * 2 + 1] for i in range(0, 60)])
|
|
u_opt_1 = sol.value(self.U[0,:])
|
|
u_opt_2 = sol.value(self.U[1,:])
|
|
tend = time.time()
|
|
print("postprocessing took {} seconds".format(tend - tstart))
|
|
|
|
return (u_opt_1, u_opt_2, sol.value(posx), sol.value(posy))
|
|
|
|
#lam_g0 = sol.value(self.opti.lam_g)
|
|
|
|
#self.opti.solve()
|
|
|
|
from pylab import plot, step, figure, legend, show, spy
|
|
|
|
plot(sol.value(posx),label="posx")
|
|
plot(sol.value(posy),label="posy")
|
|
plot(sol.value(angle),label="angle")
|
|
|
|
plt.figure(3)
|
|
plot(sol.value(posx), sol.value(posy))
|
|
ax = plt.gca()
|
|
#circle = plt.Circle(p, r)
|
|
#ax.add_artist(circle)
|
|
#plot(limit(sol.value(pos)),'r--',label="speed limit")
|
|
#step(range(N),sol.value(U),'k',label="throttle")
|
|
legend(loc="upper left")
|
|
plt.show()
|
|
pass
|
|
# linearization
|
|
# A = zeros(states.shape[0], states.shape[0])
|
|
# for i in range(f.shape[0]):
|
|
# for j in range(states.shape[0]):
|
|
# A[i,j] = diff(f[i,0], states[j])
|
|
# Alin = A.subs([(theta,0), (omegar,0), (omegal,0)])
|
|
# print("A = {}".format(Alin))
|
|
# B = zeros(f.shape[0], controls.shape[0])
|
|
# for i in range(f.shape[0]):
|
|
# for j in range(controls.shape[0]):
|
|
# B[i,j] = diff(f[i,0], controls[j])
|
|
# print("B = {}".format(B))
|
|
# dfdtheta = diff(f, theta)
|
|
#print(dfdtheta.doit())
|
|
|
|
# takeaway: linearization is not helpful, because the linearized system is not stabilizable
|
|
# -> alternative: use nonlinear control method
|