added more waypoints. final version for TdM 2019

This commit is contained in:
Simon Pirkelmann 2019-07-12 17:45:31 +02:00
parent e219f7de38
commit a630150a24

View File

@ -124,14 +124,14 @@ def f_ode(t, x, u):
class RemoteController: class RemoteController:
def __init__(self): def __init__(self):
self.robots = [Robot(15, '192.168.1.103')] self.robots = [Robot(11, '192.168.1.103')]
#self.robots = [Robot(14, '192.168.1.102')] #self.robots = [Robot(14, '192.168.1.102')]
self.robot_ids = {} self.robot_ids = {}
for r in self.robots: for r in self.robots:
self.robot_ids[r.id] = r self.robot_ids[r.id] = r
obst = [Obstacle(12, 0.275), Obstacle(10, 0.275), Obstacle(13, 0.275), Obstacle(14, 0.275)] obst = [Obstacle(12, 0.275), Obstacle(10, 0.275), Obstacle(14, 0.275), Obstacle(15, 0.275)]
self.obstacles = {} self.obstacles = {}
for r in obst: for r in obst:
@ -369,10 +369,18 @@ class RemoteController:
markers_out = self.track.outer.values() markers_out = self.track.outer.values()
# find targets: # find targets:
lamb = 0.3 lamb = 0.2
j = 0
for i in range(0,4): for i in range(0,4):
p = np.array(markers_in[i]) + lamb * (np.array(markers_out[i]) - np.array(markers_in[i])) p = np.array(markers_in[i]) + lamb * (np.array(markers_out[i]) - np.array(markers_in[i]))
targets[i] = (p[0],p[1], 0.0) targets[j] = (p[0],p[1], 0.0)
j += 1
if i < 3:
mean_in = (np.array(markers_in[i]) + np.array(markers_in[i+1])) * 0.5
mean_out = (np.array(markers_out[i]) + np.array(markers_out[i+1])) * 0.5
mean = mean_in + (1.0 - lamb) * (mean_out - mean_in)
targets[j] = (mean[0], mean[1], 0.0)
j += 1
auto_control = False auto_control = False
current_target = 0 current_target = 0
@ -405,6 +413,12 @@ class RemoteController:
#self.target = (-0.5,0.5, 0.0) #self.target = (-0.5,0.5, 0.0)
self.target = targets[3] self.target = targets[3]
elif event.key == pygame.K_5: elif event.key == pygame.K_5:
self.target = targets[4]
elif event.key == pygame.K_6:
self.target = targets[5]
elif event.key == pygame.K_7:
self.target = targets[6]
elif event.key == pygame.K_SPACE:
auto_control = not auto_control auto_control = not auto_control
if auto_control: if auto_control:
self.target = targets[current_target] self.target = targets[current_target]
@ -416,7 +430,7 @@ class RemoteController:
if auto_control: if auto_control:
if np.linalg.norm(x_pred[0:2]-np.array(self.target[0:2])) < 0.3: if np.linalg.norm(x_pred[0:2]-np.array(self.target[0:2])) < 0.3:
print("close to target!") print("close to target!")
current_target = (current_target + 1) % 4 current_target = (current_target + 1) % 7
self.target = targets[current_target] self.target = targets[current_target]
print("new target = {}".format(current_target)) print("new target = {}".format(current_target))