Compare commits

...

16 Commits

Author SHA1 Message Date
9595a68494 added custom messages for pos and angle 2019-06-11 09:56:25 +02:00
19ec0283bf added notes on add custom messages and some initial notes for the setup (required ros packages) 2019-06-11 09:55:05 +02:00
1bedc7f01d added note on how to resolve problem with buggy motor shield 2019-06-11 09:53:13 +02:00
5715fc6349 added webrepl for third robot 2019-06-11 09:51:36 +02:00
441e55000a added option to sest maximum speed 2019-06-10 23:16:56 +02:00
79f4fcc032 added error message if no i2c devices are found 2019-06-10 23:16:14 +02:00
2fb69de9c5 added ip of 3. robot 2019-06-10 23:15:36 +02:00
685dd6a4c9 Webrepl with preset ip address for the robot 2019-06-07 14:22:19 +02:00
1d8d2d8043 added ros node for transformation of translation and quaternion rotation to position and angle in 2d plane 2019-06-07 14:20:05 +02:00
5bc1c9ca9f fixed another reconnection issue that only seems to occur with windows 2019-06-06 15:19:14 +02:00
4bff8e1df5 fixed a bug in keyboard remote controller 2019-06-05 08:06:11 +02:00
6ebb9338c0 removed remote control script from micropython folder 2019-06-05 08:05:34 +02:00
69c40a3fe2 fixed issue with reconnection problem after socket is closed 2019-06-05 08:04:26 +02:00
4d7c0e7be7 robot now tries to automatically connect to an existing wifi network. if the network is not found then the robot will become an access point instead 2019-06-05 08:03:31 +02:00
530cef54b3 added simulation of robot 2019-05-24 09:21:54 -05:00
f3de1b173a example for optimal control of robot 2019-05-24 09:20:49 -05:00
17 changed files with 2185 additions and 149 deletions

View File

@ -0,0 +1,7 @@
See: https://github.com/wemos/WEMOS_Motor_Shield_Arduino_Library/issues/1
Possible solution: Reprogram STM32 on the shield:
https://hackaday.io/project/18439-motor-shield-reprogramming
Can be done only with wemos:
https://github.com/thomasfredericks/wemos_motor_shield

View File

@ -0,0 +1,56 @@
Creating ros package with catkin and adding custom messages:
############################################################
1. Create a workspace:
----------------------
$ mkdir catkin_ws
$ cd catkin_ws
$ mkdir src
$ catkin_make
$ source devel/setup.bash
2. Create the package:
----------------------
$ cd catkin_ws/src
$ catkin_create_pkg <package_name> std_msgs rospy roscpp
$ cd ..
$ catkin_make # this build the package
3. Create the custom messages:
------------------------------
$ cd catkin_ws/src/<package_name>
$ mkdir msg
$ cd msg
$ echo "int64 num" > Num.msg
Then edit the package.xml file and add the following:
<build_depend>message_generation</build_depend>
<exec_depend>message_runtime</exec_depend>
Also edit CMakeLists.txt and to the find_package() function add:
message_generation
and to the catkin_package() add:
CATKIN_DEPENDS message_runtime
Then, find add_message_files() (uncomment if necessary) and change it to the following:
add_message_files(
FILES
Num.msg
)
Finally, uncomment the generate_messages() function such that it reads:
generate_messages(
DEPENDENCIES
std_msgs
)
To build the new messages do the following:
$ cd catkin_ws
$ catkin_make # builds the new messages
$ catkin_make install
As the last step, source the setup.bash:
$ . install/setup.bash
Now the new messages should be available and you can find them with
$ rosmsg list
Don't forget to source the setup.bash everytime you want to use the messages!

8
docu/notes_ros_setup.txt Normal file
View File

@ -0,0 +1,8 @@
1. Install ROS (Robot Operating System) (see: http://wiki.ros.org/kinetic/Installation/Ubuntu)
2. Install the following dependencies:
- cv_camera (see: http://wiki.ros.org/cv_camera):
$ sudo apt-get install ros-kinetic-cv-camera
- Aruco Marker detection using fiducials package (see: http://wiki.ros.org/fiducials)
$ sudo apt-get install ros-kinetic-fiducials
-

View File

@ -3,11 +3,42 @@ import time
# connect to local wifi network # connect to local wifi network
sta_if = network.WLAN(network.STA_IF) sta_if = network.WLAN(network.STA_IF)
ap_if = network.WLAN(network.AP_IF)
sta_if.active(True)
networks = sta_if.scan()
print("networks found: {}".format(networks))
ap_found = False
# TODO: edit these lines
network_name = 'M5Wifi'
password = '<PW>'
#desired_ip = '192.168.1.101'
#desired_ip = '192.168.1.102'
desired_ip = '192.168.1.103'
subnet = '255.255.255.0'
gateway = '192.168.1.1'
dns = '192.168.1.1'
for n in networks:
if network_name == n[0].decode():
print("existing network found: {}".format(n[0]))
print("connecting to the network ...")
# set static ip
sta_if.ifconfig((desired_ip, subnet, gateway, dns))
sta_if.connect(network_name, password)
my_ip = sta_if.ifconfig()
print("my_ip[0] = {}".format(my_ip[0]))
if my_ip[0] == desired_ip:
ap_found = True
ap_if.active(False)
print("disabling access point interface")
if not ap_found:
print("could not connect to network, becoming an access point instead")
sta_if.active(False) sta_if.active(False)
#sta_if.active(True) ap_if.active(True)
# TODO: edit this line my_ip = ap_if.ifconfig()
#sta_if.connect("<SSID>", "<PASSWORD>") print("disabling station interface")
#sta_if.ifconfig() print("my_ip = {}".format(my_ip))
# start terminal over wifi # start terminal over wifi
time.sleep(5) # wait for wifi to connect time.sleep(5) # wait for wifi to connect

View File

@ -1,4 +1,5 @@
import machine import machine
import sys
from machine import I2C, Pin from machine import I2C, Pin
import d1motor import d1motor
@ -9,35 +10,44 @@ import esp
class Robot: class Robot:
def __init__(self): def __init__(self):
print("setting up I2C") print("setting up I2C ...")
d1 = Pin(5) d1 = Pin(5)
d2 = Pin(4) d2 = Pin(4)
i2c = I2C(scl=d1, sda=d2) i2c = I2C(scl=d1, sda=d2)
addr = i2c.scan()[0] i2c_scan = i2c.scan()
print("i2c scan = {}".format(addr)) if len(i2c_scan) > 0:
print("setting up motors") i2c_addr = i2c_scan[0]
self.m1 = d1motor.Motor(0, i2c, address=addr) print("i2c scan = {}".format(i2c_addr))
self.m2 = d1motor.Motor(1, i2c, address=addr) print("setting up motors ...")
print("motor setup complete") self.m1 = d1motor.Motor(0, i2c, address=i2c_addr)
self.m2 = d1motor.Motor(1, i2c, address=i2c_addr)
self.m1.speed(0)
self.m2.speed(0)
print("motor setup complete!")
else:
print("error: no i2c interfaces found!")
sys.exit(1)
ip = my_ip[0]
# setup socket for remote control # setup socket for remote control
self.addr = usocket.getaddrinfo('192.168.4.1', 1234)[0][-1] self.addr = usocket.getaddrinfo(ip, 1234)[0][-1]
def remote_control(self): def remote_control(self):
while True: while True:
# dirty hack: free memory such that reuse of socket is possible print("setting up socket communication ...")
print(esp.freemem()) socket_setup_complete = False
if esp.freemem() < 9 * 1024: while not socket_setup_complete:
s = time.ticks_ms() try:
while esp.freemem() < 9 * 1024: print("trying to create socket on address {}".format(self.addr))
pass
print("waited {} ms".format((time.ticks_ms() - s)))
print("setting up socket communication")
socket = usocket.socket() socket = usocket.socket()
socket.bind(self.addr) socket.bind(self.addr)
print("socket setup complete") print("socket setup complete!")
socket_setup_complete = True
except Exception as e:
print("could not create socket. error msg: {}\nwaiting 1 sec and retrying...".format(e))
time.sleep(1.0)
print("waiting for connections on {}".format(self.addr)) print("waiting for connections on {} ...".format(self.addr))
socket.listen(1) socket.listen(1)
res = socket.accept() # this blocks until someone connects to the socket res = socket.accept() # this blocks until someone connects to the socket
comm_socket = res[0] comm_socket = res[0]
@ -47,27 +57,33 @@ class Robot:
# expected data: '(u1, u2)'\n" # expected data: '(u1, u2)'\n"
# where ui = control for motor i # where ui = control for motor i
# ui \in [-1.0, 1.0] # ui \in [-1.0, 1.0]
try:
data = comm_socket.readline() data = comm_socket.readline()
data_str = data.decode() data_str = data.decode()
print("Data received: {}".format(data_str)) #print("Data received: {}".format(data_str))
try: #print("processing data = {}".format(data_str))
print("processing data = {}".format(data_str))
l = data_str.strip('()\n').split(',') l = data_str.strip('()\n').split(',')
print("l = {}".format(l)) #print("l = {}".format(l))
u1 = int(float(l[0])*10000) u1 = int(float(l[0])*100)
print("u1 = {}".format(u1)) #print("u1 = {}".format(u1))
u2 = int(float(l[1])*10000) u2 = int(float(l[1])*100)
print("u2 = {}".format(u2)) #print("u2 = {}".format(u2))
except ValueError: except ValueError:
print("ValueError: Data has wrong format.") print("ValueError: Data has wrong format.")
print("Data received: {}".format(data_str)) print("Data received: {}".format(data_str))
print("Shutting down") print("Shutting down ...")
u1 = u2 = 0 u1 = u2 = 0
listening = False listening = False
except IndexError: except IndexError:
print("IndexError: Data has wrong format.") print("IndexError: Data has wrong format.")
print("Data received: {}".format(data_str)) print("Data received: {}".format(data_str))
print("Shutting down") print("Shutting down ...")
u1 = u2 = 0
listening = False
except Exception as e:
print("Some other error occured")
print("Exception: {}".format(e))
print("Shutting down ...")
u1 = u2 = 0 u1 = u2 = 0
listening = False listening = False
finally: finally:
@ -77,7 +93,7 @@ class Robot:
socket.close() socket.close()
del comm_socket del comm_socket
del socket del socket
print("disconnected") print("disconnected!")
wall_e = Robot() wall_e = Robot()
wall_e.remote_control() wall_e.remote_control()

View File

@ -1,39 +0,0 @@
import socket
import pygame
pygame.init()
pygame.display.set_mode((640, 480))
rc_socket = socket.socket()
try:
rc_socket.connect(('192.168.4.1', 1234)) # connect to robot
except socket.error:
print("could not connect to socket")
while True:
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
u1 = 1.0
u2 = -1.0
print("turn left: ({},{})".format(u1, u2))
elif event.key == pygame.K_RIGHT:
u1 = -1.0
u2 = 1.0
print("turn right: ({},{})".format(u1, u2))
elif event.key == pygame.K_UP:
u1 = -1.0
u2 = -1.0
print("forward: ({},{})".format(u1, u2))
elif event.key == pygame.K_DOWN:
u1 = 1.0
u2 = 1.0
print("forward: ({},{})".format(u1, u2))
rc_socket.send('({},{})\n'.format(u1, u2))
elif event.type == pygame.KEYUP:
u1 = 0
u2 = 0
print("key released, resetting: ({},{})".format(u1, u2))
rc_socket.send('({},{})\n'.format(u1, u2))

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,224 @@
from casadi import *
# look at: https://github.com/casadi/casadi/blob/master/docs/examples/python/vdp_indirect_multiple_shooting.py
T = 3.0
N = 30
x = SX.sym('x')
y = SX.sym('y')
theta = SX.sym('theta')
state = vertcat(x, y, theta)
r = 0.03
R = 0.05
d = 0.02
#r = SX.sym('r')
#R = SX.sym('R')
#d = SX.sym('d')
omegar = SX.sym('omegar')
omegal = SX.sym('omegal')
control = vertcat(omegar, omegal)
f1 = (r/2 * cos(theta) - r*d/(2*R) * sin(theta)) * omegar + (r/2 * cos(theta) + r*d/(2*R) * sin(theta)) * omegal
f2 = (r/2 * sin(theta) + r*d/(2*R) * cos(theta)) * omegar + (r/2 * sin(theta) - r*d/(2*R) * cos(theta)) * omegal
f3 = r/(2*R) * omegar - r/(2*R) * omegal
xdot = vertcat(f1, f2, f3)
f = Function('f', [x,y,theta, omegar, omegal], [f1, f2, f3])
print("f = {}".format(f))
L = x**2 + y**2 + 1e-2 * theta**2 + 1e-4 * (omegar**2 + omegal**2)
# Fixed step Runge-Kutta 4 integrator
M = 4 # RK4 steps per interval
DT = T/N/M
print("DT = {}".format(DT))
f = Function('f', [state, control], [xdot, L])
X0 = MX.sym('X0', 3)
U = MX.sym('U', 2)
X = X0
Q = 0
runge_kutta = True
if runge_kutta:
for j in range(M):
k1, k1_q = f(X, U)
k2, k2_q = f(X + DT/2 * k1, U)
k3, k3_q = f(X + DT/2 * k2, U)
k4, k4_q = f(X + DT * k3, U)
X=X+DT/6*(k1 +2*k2 +2*k3 +k4)
Q = Q + DT/6*(k1_q + 2*k2_q + 2*k3_q + k4_q)
else:
DT = T/N
k1, k1_q = f(X, U)
X = X + DT * k1
Q = Q + DT * k1_q
F = Function('F', [X0, U], [X, Q],['x0','p'],['xf','qf'])
#F_euler = Function('F_euler', [X0, U], [Xeuler, Qeuler], ['x0', 'p'], ['xf', 'qf'])
Fk = F(x0=[0.2,0.3, 0.0],p=[-1.1, 1.1])
print(Fk['xf'])
print(Fk['qf'])
# Start with an empty NLP
w=[]
w0 = []
lbw = []
ubw = []
J = 0
g=[]
lbg = []
ubg = []
# Formulate the NLP
Xk = MX([1.1, 1.1, 0.0])
for k in range(N):
# New NLP variable for the control
U1k = MX.sym('U1_' + str(k), 2)
#U2k = MX.sym('U2_' + str(k))
w += [U1k]
lbw += [-10, -10]
ubw += [10, 10]
w0 += [0, 0]
# Integrate till the end of the interval
Fk = F(x0=Xk, p=U1k)
Xk = Fk['xf']
J=J+Fk['qf']
# Add inequality constraint
#g += [Xk[1]]
#lbg += [-.0]
#ubg += [inf]
# Create an NLP solver
prob = {'f': J, 'x': vertcat(*w), 'g': vertcat(*g)}
solver = nlpsol('solver', 'ipopt', prob);
# Solve the NLP
sol = solver(x0=w0, lbx=lbw, ubx=ubw, lbg=lbg, ubg=ubg)
w_opt = sol['x']
# Plot the solution
u_opt = w_opt
x_opt = [[1.1, 1.1, -0.0]]
for k in range(N):
Fk = F(x0=x_opt[-1], p=u_opt[2*k:2*k+2])
x_opt += [Fk['xf'].full()]
x1_opt = [r[0] for r in x_opt]
x2_opt = [r[1] for r in x_opt]
x3_opt = [r[2] for r in x_opt]
tgrid = [T/N*k for k in range(N+1)]
import matplotlib.pyplot as plt
plt.figure(1)
plt.clf()
plt.plot(tgrid, x1_opt, '--')
plt.plot(tgrid, x2_opt, '-')
plt.plot(tgrid, x3_opt, '*')
#plt.step(tgrid, vertcat(DM.nan(1), u_opt), '-.')
plt.xlabel('t')
plt.legend(['x1','x2','x3','u'])
plt.grid()
#plt.show()
# alternative solution using multiple shooting (way faster!)
opti = Opti() # Optimization problem
# ---- decision variables ---------
X = opti.variable(3,N+1) # state trajectory
Q = opti.variable(1,N+1) # state trajectory
posx = X[0,:]
posy = X[1,:]
angle = X[2,:]
U = opti.variable(2,N) # control trajectory (throttle)
#T = opti.variable() # final time
# ---- objective ---------
#opti.minimize(T) # race in minimal time
# ---- dynamic constraints --------
#f = lambda x,u: vertcat(f1, f2, f3) # dx/dt = f(x,u)
dt = T/N # length of a control interval
for k in range(N): # loop over control intervals
# Runge-Kutta 4 integration
k1, k1_q = f(X[:,k], U[:,k])
k2, k2_q = f(X[:,k]+dt/2*k1, U[:,k])
k3, k3_q = f(X[:,k]+dt/2*k2, U[:,k])
k4, k4_q = f(X[:,k]+dt*k3, U[:,k])
x_next = X[:,k] + dt/6*(k1+2*k2+2*k3+k4)
q_next = Q[:,k] + dt/6*(k1_q + 2 * k2_q + 2 * k3_q + k4_q)
opti.subject_to(X[:,k+1]==x_next) # close the gaps
opti.subject_to(Q[:,k+1]==q_next) # close the gaps
opti.minimize(Q[:,N])
# ---- path constraints -----------
#limit = lambda pos: 1-sin(2*pi*pos)/2
#opti.subject_to(speed<=limit(pos)) # track speed limit
opti.subject_to(opti.bounded(-10,U,10)) # control is limited
# ---- boundary conditions --------
opti.subject_to(posx[0]==1.10) # start at position 0 ...
opti.subject_to(posy[0]==1.10) # ... from stand-still
opti.subject_to(angle[0]==0.0) # finish line at position 1
#opti.subject_to(speed[-1]==0) # .. with speed 0
opti.subject_to(Q[:,0]==0.0)
# ---- misc. constraints ----------
#opti.subject_to(X[1,:]>=0) # Time must be positive
#opti.subject_to(X[2,:]<=4) # Time must be positive
#opti.subject_to(X[2,:]>=-2) # Time must be positive
r = 0.25
p = (0.5, 0.5)
for k in range(N):
opti.subject_to((X[0,k]-p[0])**2 + (X[1,k]-p[1])**2 > r**2)
pass
# ---- initial values for solver ---
#opti.set_initial(speed, 1)
#opti.set_initial(T, 1)
# ---- solve NLP ------
opti.solver("ipopt") # set numerical backend
sol = opti.solve() # actual solve
#x0 = sol.value(opti.x)
#lam_g0 = sol.value(opti.lam_g)
#opti.set_initial(opti.lam_g, lam_g0)
#opti.set_initial(opti.x, x0)
#opti.solve()
from pylab import plot, step, figure, legend, show, spy
plot(sol.value(posx),label="posx")
plot(sol.value(posy),label="posy")
plot(sol.value(angle),label="angle")
plt.figure()
plot(sol.value(posx), sol.value(posy))
ax = plt.gca()
circle = plt.Circle(p, r)
ax.add_artist(circle)
#plot(limit(sol.value(pos)),'r--',label="speed limit")
#step(range(N),sol.value(U),'k',label="throttle")
legend(loc="upper left")
plt.show()
pass
# linearization
# A = zeros(states.shape[0], states.shape[0])
# for i in range(f.shape[0]):
# for j in range(states.shape[0]):
# A[i,j] = diff(f[i,0], states[j])
# Alin = A.subs([(theta,0), (omegar,0), (omegal,0)])
# print("A = {}".format(Alin))
# B = zeros(f.shape[0], controls.shape[0])
# for i in range(f.shape[0]):
# for j in range(controls.shape[0]):
# B[i,j] = diff(f[i,0], controls[j])
# print("B = {}".format(B))
# dfdtheta = diff(f, theta)
#print(dfdtheta.doit())
# takeaway: linearization is not helpful, because the linearized system is not stabilizable
# -> alternative: use nonlinear control method

View File

@ -0,0 +1 @@
/opt/ros/kinetic/share/catkin/cmake/toplevel.cmake

View File

@ -0,0 +1,202 @@
cmake_minimum_required(VERSION 2.8.3)
project(marker_pos_angle)
## Compile as C++11, supported in ROS Kinetic and newer
# add_compile_options(-std=c++11)
## Find catkin macros and libraries
## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
## is used, also find other catkin packages
find_package(catkin REQUIRED COMPONENTS
roscpp
rospy
std_msgs
message_generation
)
## System dependencies are found with CMake's conventions
# find_package(Boost REQUIRED COMPONENTS system)
## Uncomment this if the package has a setup.py. This macro ensures
## modules and global scripts declared therein get installed
## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html
# catkin_python_setup()
################################################
## Declare ROS messages, services and actions ##
################################################
## To declare and build messages, services or actions from within this
## package, follow these steps:
## * Let MSG_DEP_SET be the set of packages whose message types you use in
## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...).
## * In the file package.xml:
## * add a build_depend tag for "message_generation"
## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET
## * If MSG_DEP_SET isn't empty the following dependency has been pulled in
## but can be declared for certainty nonetheless:
## * add a exec_depend tag for "message_runtime"
## * In this file (CMakeLists.txt):
## * add "message_generation" and every package in MSG_DEP_SET to
## find_package(catkin REQUIRED COMPONENTS ...)
## * add "message_runtime" and every package in MSG_DEP_SET to
## catkin_package(CATKIN_DEPENDS ...)
## * uncomment the add_*_files sections below as needed
## and list every .msg/.srv/.action file to be processed
## * uncomment the generate_messages entry below
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
add_message_files(
FILES
id_pos_angle.msg
# Message1.msg
# Message2.msg
)
## Generate services in the 'srv' folder
# add_service_files(
# FILES
# Service1.srv
# Service2.srv
# )
## Generate actions in the 'action' folder
# add_action_files(
# FILES
# Action1.action
# Action2.action
# )
## Generate added messages and services with any dependencies listed here
generate_messages(
DEPENDENCIES
std_msgs
)
################################################
## Declare ROS dynamic reconfigure parameters ##
################################################
## To declare and build dynamic reconfigure parameters within this
## package, follow these steps:
## * In the file package.xml:
## * add a build_depend and a exec_depend tag for "dynamic_reconfigure"
## * In this file (CMakeLists.txt):
## * add "dynamic_reconfigure" to
## find_package(catkin REQUIRED COMPONENTS ...)
## * uncomment the "generate_dynamic_reconfigure_options" section below
## and list every .cfg file to be processed
## Generate dynamic reconfigure parameters in the 'cfg' folder
# generate_dynamic_reconfigure_options(
# cfg/DynReconf1.cfg
# cfg/DynReconf2.cfg
# )
###################################
## catkin specific configuration ##
###################################
## The catkin_package macro generates cmake config files for your package
## Declare things to be passed to dependent projects
## INCLUDE_DIRS: uncomment this if your package contains header files
## LIBRARIES: libraries you create in this project that dependent projects also need
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
# INCLUDE_DIRS include
# LIBRARIES marker_pos_angle
# CATKIN_DEPENDS roscpp rospy std_msgs
# DEPENDS system_lib
CATKIN_DEPENDS message_runtime
)
###########
## Build ##
###########
## Specify additional locations of header files
## Your package locations should be listed before other locations
include_directories(
# include
${catkin_INCLUDE_DIRS}
)
## Declare a C++ library
# add_library(${PROJECT_NAME}
# src/${PROJECT_NAME}/marker_pos_angle.cpp
# )
## Add cmake target dependencies of the library
## as an example, code may need to be generated before libraries
## either from message generation or dynamic reconfigure
# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Declare a C++ executable
## With catkin_make all packages are built within a single CMake context
## The recommended prefix ensures that target names across packages don't collide
# add_executable(${PROJECT_NAME}_node src/marker_pos_angle_node.cpp)
## Rename C++ executable without prefix
## The above recommended prefix causes long target names, the following renames the
## target back to the shorter version for ease of user use
## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
## Add cmake target dependencies of the executable
## same as for the library above
# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
## Specify libraries to link a library or executable target against
# target_link_libraries(${PROJECT_NAME}_node
# ${catkin_LIBRARIES}
# )
#############
## Install ##
#############
# all install targets should use catkin DESTINATION variables
# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
## Mark executable scripts (Python etc.) for installation
## in contrast to setup.py, you can choose the destination
# install(PROGRAMS
# scripts/my_python_script
# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark executables and/or libraries for installation
# install(TARGETS ${PROJECT_NAME} ${PROJECT_NAME}_node
# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
# )
## Mark cpp header files for installation
# install(DIRECTORY include/${PROJECT_NAME}/
# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
# FILES_MATCHING PATTERN "*.h"
# PATTERN ".svn" EXCLUDE
# )
## Mark other files for installation (e.g. launch and bag files, etc.)
# install(FILES
# # myfile1
# # myfile2
# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
# )
#############
## Testing ##
#############
## Add gtest based cpp test target and link libraries
# catkin_add_gtest(${PROJECT_NAME}-test test/test_marker_pos_angle.cpp)
# if(TARGET ${PROJECT_NAME}-test)
# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
# endif()
## Add folders to be run by python nosetests
# catkin_add_nosetests(test)

View File

@ -0,0 +1,4 @@
int32 id
float32 x
float32 y
float32 angle

View File

@ -0,0 +1,70 @@
<?xml version="1.0"?>
<package format="2">
<name>marker_pos_angle</name>
<version>0.0.0</version>
<description>The marker_pos_angle package</description>
<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="bt304019@todo.todo">bt304019</maintainer>
<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>
<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/marker_pos_angle</url> -->
<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->
<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roscpp</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_depend>message_generation</build_depend>
<build_export_depend>roscpp</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>std_msgs</exec_depend>
<exec_depend>message_runtime</exec_depend>
<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->
</export>
</package>

View File

@ -0,0 +1,38 @@
import sys
import rospy
from marker_pos_angle.msg import id_pos_angle
from fiducial_msgs.msg import FiducialTransformArray
from tf.transformations import euler_from_quaternion
pub = rospy.Publisher('marker_id_pos_angle', id_pos_angle, queue_size=10)
'''
This callback function listens to fiducial messages that describe the translation and rotation of markers w.r.t. to the
camera and transforms them to a position in the 2d plane and the angle of the robot in the plane
'''
def callback(data):
for d in data.transforms:
id = d.fiducial_id
q = d.transform.rotation
t = d.transform.translation
euler = euler_from_quaternion([q.x, q.y, q.z, q.w])
m = id_pos_angle()
m.id = id
m.x = t.x
m.y = -t.y
m.angle = -euler[2]
pub.publish(m)
def main(args):
rospy.init_node('euler_quaternion_node', anonymous=True)
quaternion_sub = rospy.Subscriber("/fiducial_transforms", FiducialTransformArray, callback)
rospy.spin()
if __name__ == '__main__':
main(sys.argv)

View File

@ -7,33 +7,35 @@ pygame.display.set_mode((640, 480))
rc_socket = socket.socket() rc_socket = socket.socket()
try: try:
rc_socket.connect(('192.168.4.1', 1234)) # connect to robot rc_socket.connect(('192.168.4.1', 1234)) # connect to robot
#rc_socket.connect(('192.168.1.101', 1234)) # connect to robot
except socket.error: except socket.error:
print("could not connect to socket") print("could not connect to socket")
while True: while True:
u1 = 0
u2 = 0
vmax = 0.5
events = pygame.event.get() events = pygame.event.get()
for event in events: for event in events:
if event.type == pygame.KEYDOWN: if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT: if event.key == pygame.K_LEFT:
u1 = 1.0 u1 = vmax
u2 = -1.0 u2 = -vmax
print("turn left: ({},{})".format(u1, u2)) print("turn left: ({},{})".format(u1, u2))
elif event.key == pygame.K_RIGHT: elif event.key == pygame.K_RIGHT:
u1 = -1.0 u1 = -vmax
u2 = 1.0 u2 = vmax
print("turn right: ({},{})".format(u1, u2)) print("turn right: ({},{})".format(u1, u2))
elif event.key == pygame.K_UP: elif event.key == pygame.K_UP:
u1 = -1.0 u1 = -vmax
u2 = -1.0 u2 = -vmax
print("forward: ({},{})".format(u1, u2)) print("forward: ({},{})".format(u1, u2))
elif event.key == pygame.K_DOWN: elif event.key == pygame.K_DOWN:
u1 = 1.0 u1 = vmax
u2 = 1.0 u2 = vmax
print("forward: ({},{})".format(u1, u2)) print("forward: ({},{})".format(u1, u2))
rc_socket.send('({},{})\n'.format(u1, u2)) rc_socket.send('({},{})\n'.format(u1, u2))
elif event.type == pygame.KEYUP: elif event.type == pygame.KEYUP:
u1 = 0
u2 = 0
print("key released, resetting: ({},{})".format(u1, u2)) print("key released, resetting: ({},{})".format(u1, u2))
rc_socket.send('({},{})\n'.format(u1, u2)) rc_socket.send('({},{})\n'.format(u1, u2))

View File

@ -1,5 +1,6 @@
# startup: # startup:
# roscore # roscore
# rosparam set cv_camera/device_id 0
# rosrun cv_camera cv_camera_node # rosrun cv_camera cv_camera_node
import sys import sys
@ -9,10 +10,20 @@ import numpy as np
import cv2 import cv2
import cv2.aruco as aruco import cv2.aruco as aruco
import socket import socket
import scipy.integrate
import threading
from copy import deepcopy
import matplotlib.pyplot as plt
import matplotlib.animation as anim
import time
from sensor_msgs.msg import Image from sensor_msgs.msg import Image
from sensor_msgs.msg import CompressedImage from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge, CvBridgeError from cv_bridge import CvBridge, CvBridgeError
import math
pygame.init() pygame.init()
pygame.font.init() pygame.font.init()
@ -20,8 +31,8 @@ pygame.font.init()
myfont = pygame.font.SysFont('Comic Sans MS', 30) myfont = pygame.font.SysFont('Comic Sans MS', 30)
pygame.display.set_caption("ROS camera stream on Pygame") pygame.display.set_caption("ROS camera stream on Pygame")
screenheight = 480 screenheight = 1024
screenwidth = 640 #4*screenheight//3 screenwidth = 1280 #4*screenheight//3
screen = pygame.display.set_mode([screenwidth, screenheight]) screen = pygame.display.set_mode([screenwidth, screenheight])
red = (255, 0, 0) red = (255, 0, 0)
teal = (0, 255, 255) teal = (0, 255, 255)
@ -31,33 +42,93 @@ camera_stream = "/cv_camera/image_raw"
#camera_stream = "/image_raw" #camera_stream = "/image_raw"
compression = False compression = False
# taken from https://www.learnopencv.com/rotation-matrix-to-euler-angles/
# Checks if a matrix is a valid rotation matrix.
def isRotationMatrix(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
# Calculates rotation matrix to euler angles
# The result is the same as MATLAB except the order
# of the euler angles ( x and z are swapped ).
def rotationMatrixToEulerAngles(R):
assert (isRotationMatrix(R))
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
class Robot: class Robot:
def __init__(self, id, ip=None): def __init__(self, id, ip=None):
self.pos = None self.pos = None
self.orient = None
self.id = id self.id = id
self.ip = None self.pos = None
self.euler = None
self.ip = ip
def f_ode(t, x, u):
# dynamical model of the two-wheeled robot
# TODO: find exact values for these parameters
r = 0.03
R = 0.05
d = 0.02
theta = x[2]
omega_r = u[0]
omega_l = u[1]
dx = np.zeros(3)
dx[0] = (r/2.0 * np.cos(theta) - r*d/(2.0*R) * np.sin(theta)) * omega_r \
+ (r/2.0 * np.cos(theta) + r*d/(2.0 * R) * np.sin(theta)) * omega_l
dx[1] = (r/2.0 * np.sin(theta) + r*d/(2.0*R) * np.cos(theta)) * omega_r \
+ (r/2 * np.sin(theta) - r*d/(2.0*R) * np.cos(theta)) * omega_l
dx[2] = -r/(2.0*R) * (omega_r - omega_l)
return dx
class RemoteController: class RemoteController:
def __init__(self): def __init__(self):
self.cam = cv2.VideoCapture(2) #self.cam = cv2.VideoCapture(1)
#self.image_pub = rospy.Publisher("pygame_image", Image) #self.image_pub = rospy.Publisher("pygame_image", Image)
#self.bridge = CvBridge() self.bridge = CvBridge()
#if compression:
# self.image_sub = rospy.Subscriber(camera_stream + "/compressed", CompressedImage, self.callback)
#else:
# self.image_sub = rospy.Subscriber(camera_stream, Image, self.callback)
#self.cv_image = np.zeros((1, 1, 3), np.uint8) #self.cv_image = np.zeros((1, 1, 3), np.uint8)
self.cv_image = None
self.robots = [Robot(1), Robot(2), Robot(512)] self.robots = [Robot(2), Robot(6), Robot(7), Robot(8), Robot(9)]
self.robot_ids = [r.id for r in self.robots]
screen.fill([0, 0, 0]) screen.fill([0, 0, 0])
cv_file = cv2.FileStorage("test.yaml", cv2.FILE_STORAGE_READ)
self.camera_matrix = cv_file.getNode("camera_matrix").mat()
self.dist_matrix = cv_file.getNode("dist_coeff").mat()
self.aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL) self.aruco_dict = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
self.parameters = aruco.DetectorParameters_create() self.parameters = aruco.DetectorParameters_create()
@ -68,14 +139,99 @@ class RemoteController:
except socket.error: except socket.error:
print("could not connect to socket") print("could not connect to socket")
def callback(self, data): self.mutex = threading.Lock()
try:
self.k = 0
self.ii = 0.1
self.pp = 0.4
self.inc = 0.0
self.alphas = []
self.speed = 1.0
self.controlling = False
self.u1 = 0.0
self.u2 = 0.0
self.t = time.time()
self.x0 = np.zeros(3)
self.ts = np.array([0.0])
self.xs = np.array([[0.0, 0.0, 0.0]])
self.ys = []
self.omegas = []
self.tms_0 = None
self.tms = None #np.array([0.0])
self.xm_0 = None
self.xms = None #np.array([[0.0, 0.0, 0.0]])
self.alpha_0 = None
self.alphas = []
self.pos_0 = None
self.possx = []
self.possy = []
if compression: if compression:
self.cv_image = self.bridge.compressed_imgmsg_to_cv2(data) self.image_sub = rospy.Subscriber(camera_stream + "/compressed", CompressedImage, self.callback)
else: else:
self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8") self.image_sub = rospy.Subscriber(camera_stream, Image, self.callback)
except CvBridgeError as e:
print(e) self.fig = plt.figure()
self.ani = anim.FuncAnimation(self.fig, init_func=self.ani_init, func=self.ani_update, interval=10, blit=True)
self.ax = self.fig.add_subplot(1,1,1)
self.xdata, self.ydata = [], []
self.line, = self.ax.plot([],[])
self.line_sim, = self.ax.plot([], [])
self.dir, = self.ax.plot([], [])
plt.xlabel('x-position')
plt.ylabel('y-position')
def ani_init(self):
self.ax.set_xlim(-2, 2)
self.ax.set_ylim(-2, 2)
self.ax.set_aspect('equal', adjustable='box')
return self.line, self.line_sim, self.dir,
def ani_update(self, frame):
self.mutex.acquire()
try:
# copy data for plot from global arrays
if self.tms is not None:
tm_local = deepcopy(self.tms)
xm_local = deepcopy(self.xms)
#print(len(tm_local))
if len(tm_local) > 0:
self.line.set_data(xm_local[:,0], xm_local[:,1])
a = xm_local[-1, 0]
b = xm_local[-1, 0]
a2 = a + np.cos(xm_local[-1, 2]) * 1.0
b2 = b + np.sin(xm_local[-1, 2]) * 1.0
self.dir.set_data(np.array([a, a2]), np.array([b, b2]))
ts_local = deepcopy(self.ts)
xs_local = deepcopy(self.xs)
if len(ts_local) > 0:
self.line_sim.set_data(xs_local[:,0], xs_local[:,1])
finally:
self.mutex.release()
return self.line, self.line_sim, self.dir,
def callback(self, data):
#self.cv_image_small = np.fliplr(self.cv_image_small) # why is this necessary? #self.cv_image_small = np.fliplr(self.cv_image_small) # why is this necessary?
# marker detection # marker detection
@ -84,29 +240,76 @@ class RemoteController:
#print("robot {} pos = {}".format(r.id, r.pos)) #print("robot {} pos = {}".format(r.id, r.pos))
def capture(self): #ret_val, self.cv_image = self.cam.read()
ret_val, self.cv_image = self.cam.read() try:
if compression:
self.cv_image = self.bridge.compressed_imgmsg_to_cv2(data)
else:
self.cv_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
corners, ids, rejectedImgPoints = aruco.detectMarkers(self.cv_image, self.aruco_dict, parameters=self.parameters) corners, ids, rejectedImgPoints = aruco.detectMarkers(self.cv_image, self.aruco_dict, parameters=self.parameters)
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(corners[0], 0.05, mtx, dist)
marker_found = len(corners) > 0 marker_found = len(corners) > 0
if marker_found: if marker_found:
for i in range(len(corners)): markers = zip(corners, ids)
#print("id = {}".format(ids[i])) #print("found!")
pos = np.mean(corners[i][0], axis=0) # filter markers with unknown ids
#print("detected markers = {}".format(markers))
markers_filtered = list(filter(lambda x: x[1] in self.robot_ids, markers))
#print("filtered markers = {}".format(markers_filtered))
if len(markers_filtered) > 0:
filtered_corners, filtered_ids = zip(*markers_filtered)
#print("filtered corners = {}".format(filtered_corners[0]))
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(filtered_corners, 0.1, self.camera_matrix,
self.dist_matrix)
aruco.drawDetectedMarkers(self.cv_image, filtered_corners)
for i in range(len(filtered_corners)):
aruco.drawAxis(self.cv_image, self.camera_matrix, self.dist_matrix, rvec[i], tvec[i],
0.1)
for r in self.robots: for r in self.robots:
if r.id == ids[i]: if r.id == filtered_ids[i]:
pos_flipped = (self.cv_image.shape[1] - int(pos[0]), int(pos[1])) r.pos = tvec[i][0] # only x and y component are important for us
r.pos = pos_flipped r.orient = rvec[i][0]
r.rot_mat, r.jacobian = cv2.Rodrigues(r.orient)
r.euler = rotationMatrixToEulerAngles(r.rot_mat)
# save measured position and angle for plotting
measurement = np.array([-r.pos[0], -r.pos[1], r.euler[2] + np.pi/4.0])
if self.xm_0 is None:
self.tms_0 = time.time()
self.xm_0 = deepcopy(measurement)
self.xm_0[2] = 0.0
self.tms = np.array([self.tms_0])
self.xms = measurement - self.xm_0
else:
self.mutex.acquire()
try:
self.tms = np.vstack((self.tms, time.time() - self.tms_0))
self.xms = np.vstack((self.xms, measurement - self.xm_0))
if len(self.tms) == 50:
self.alpha_0 = np.mean(self.xms[:,2])
finally:
self.mutex.release()
def show_display(self): def show_display(self):
while self.alpha_0 is None:
pass
self.x0[2] = self.alpha_0
print("alpha_0 = {}".format(self.alpha_0))
print("show_display")
while True: while True:
self.capture() #self.capture()
# show ros camera image on the pygame screen # show ros camera image on the pygame screen
#image = cv2.resize(self.cv_image,(screenwidth,screenheight)) if self.cv_image is not None:
image = cv2.resize(self.cv_image,(screenwidth,screenheight))
frame = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2RGB) frame = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2RGB)
frame = np.rot90(frame) frame = np.rot90(frame)
frame = pygame.surfarray.make_surface(frame) frame = pygame.surfarray.make_surface(frame)
@ -114,37 +317,170 @@ class RemoteController:
# plot robot positions # plot robot positions
for r in self.robots: for r in self.robots:
if r.pos is not None: if r.euler is not None:
#print("r.pos = {}".format(r.pos))
#print("r.euler = {}".format(r.euler[2]))
#print("drawing at {}".format(r.pos)) #print("drawing at {}".format(r.pos))
pygame.draw.circle(screen, (255, 0, 0), r.pos, 10) #pygame.draw.circle(screen, (255, 0, 0), r.pos, 10)
pass
pygame.display.update() pygame.display.update()
keyboard_control = True
keyboard_control_speed_test = False
pid = False
if keyboard_control:
events = pygame.event.get()
speed = 1.0
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.u1 = speed
self.u2 = -speed
#print("turn left: ({},{})".format(u1, u2))
elif event.key == pygame.K_RIGHT:
self.u1 = -speed
self.u2 = speed
#print("turn right: ({},{})".format(u1, u2))
elif event.key == pygame.K_UP:
self.u1 = -speed
self.u2 = -speed
print("forward: ({},{})".format(self.u1, self.u2))
elif event.key == pygame.K_DOWN:
self.u1 = speed
self.u2 = speed
#print("forward: ({},{})".format(u1, u2))
self.rc_socket.send('({},{})\n'.format(self.u1, self.u2))
elif event.type == pygame.KEYUP:
self.u1 = 0
self.u2 = 0
#print("key released, resetting: ({},{})".format(u1, u2))
self.rc_socket.send('({},{})\n'.format(self.u1, self.u2))
tnew = time.time()
dt = tnew - self.t
r = scipy.integrate.ode(f_ode)
r.set_f_params(np.array([self.u1 * 13.32, self.u2 * 13.32]))
x = self.x0
r.set_initial_value(x, self.t)
xnew = r.integrate(r.t + dt)
self.t = tnew
self.x0 = xnew
self.mutex.acquire()
try:
self.ts = np.vstack((self.ts, tnew))
self.xs = np.vstack((self.xs, xnew))
#self.ys.append(xnew[1])
#self.omegas.append(xnew[2])
finally:
self.mutex.release()
# for r in self.robots:
# if r.euler is not None:
# if self.alpha_0 is not None:
# self.alphas.append(r.euler[2]-self.alpha_0)
# else:
# self.alpha_0 = r.euler[2]
# self.alphas.append(0.0)
# if r.pos is not None:
# if self.pos_0 is not None:
# self.possx.append(r.pos[0] - self.pos_0[0])
# self.possy.append(r.pos[1] - self.pos_0[1])
# else:
# self.pos_0 = r.pos[0:2]
# self.possx.append(0.0)
# self.possy.append(0.0)
#print("(t,x,u) = ({},{},{})".format(tnew,xnew,[self.u1, self.u2]))
#time.sleep(0.1)
elif keyboard_control_speed_test:
events = pygame.event.get() events = pygame.event.get()
for event in events: for event in events:
if event.type == pygame.KEYDOWN: if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT: if event.key == pygame.K_LEFT:
u1 = 1.0 self.speed = self.speed / np.sqrt(np.sqrt(np.sqrt(10.0)))
u2 = -1.0
#print("turn left: ({},{})".format(u1, u2))
elif event.key == pygame.K_RIGHT: elif event.key == pygame.K_RIGHT:
u1 = -1.0 self.speed = self.speed * np.sqrt(np.sqrt(np.sqrt(10.0)))
u2 = 1.0
#print("turn right: ({},{})".format(u1, u2))
elif event.key == pygame.K_UP: elif event.key == pygame.K_UP:
u1 = -1.0 u1 = self.speed
u2 = -1.0 u2 = -self.speed
#print("forward: ({},{})".format(u1, u2))
elif event.key == pygame.K_DOWN: elif event.key == pygame.K_DOWN:
u1 = 1.0 u1 = 0.0
u2 = 1.0 u2 = 0.0
#print("forward: ({},{})".format(u1, u2)) print("speed = {}".format(self.speed))
self.rc_socket.send('({},{})\n'.format(u1, u2)) self.rc_socket.send('({},{})\n'.format(u1, u2))
elif event.type == pygame.KEYUP:
u1 = 0 elif pid:
u2 = 0 events = pygame.event.get()
#print("key released, resetting: ({},{})".format(u1, u2)) for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
self.ii = self.ii / np.sqrt(np.sqrt(np.sqrt(10.0)))
print("ii = {}".format(self.pp))
elif event.key == pygame.K_RIGHT:
self.ii = self.ii * np.sqrt(np.sqrt(np.sqrt(10.0)))
print("ii = {}".format(self.pp))
elif event.key == pygame.K_UP:
self.controlling = True
elif event.key == pygame.K_DOWN:
self.controlling = False
self.rc_socket.send('({},{})\n'.format(0, 0))
dt = 0.1
#i = 0.0 # 0.001
if self.controlling:
# test: turn robot such that angle is zero
for r in self.robots:
if r.euler is not None:
self.k = self.k + 1
alpha = r.euler[2]
self.alphas.append(alpha)
e = alpha - 0
p = self.pp * e
self.inc += e * dt
d = 0.0
u1 = p + self.ii * self.inc + d
u2 = - p - self.ii * self.inc - d
print("alpha = {}, u = ({}, {})".format(alpha, u1, u2))
self.rc_socket.send('({},{})\n'.format(u1, u2)) self.rc_socket.send('({},{})\n'.format(u1, u2))
time.sleep(dt)
# elif self.k == kmax:
# u1 = u2 = 0.0
# self.rc_socket.send('({},{})\n'.format(u1, u2))
# self.k = self.k + 1
#
# plt.plot(np.array(self.alphas))
# plt.show()
pass
def calibration_sequence(self):
speed = 1.0
u1 = speed
u2 = speed
self.rc_socket.send('({},{})\n'.format(u1, u2))
time.sleep(4.0)
u1 = u2 = 0
self.rc_socket.send('({},{})\n'.format(u1, u2))
self.rc_socket.send('\n')
# test:
# -> 1.6 m in 4 seconds
# angular velocity: angle/second
# 1.6 / (2 * pi * 0.03)
# l = 1.6
# r = 0.03
# t = 4.0
# -> number of rotations n = l / (2 * pi * r)
# -> angular velocity = 2 * pi * n / t = l / (r * t)
# result: maximum angular velocity: omega_max = 13.32 rad/sec
def main(args): def main(args):
@ -155,7 +491,16 @@ def main(args):
pygame.init() pygame.init()
pygame.display.set_mode((640, 480)) pygame.display.set_mode((640, 480))
rc.show_display() #p = multiprocessing.Process(target=rc.show_display)
threading.Thread(target=rc.show_display).start()
#p.start()
plt.ion()
plt.pause(0.01)
#p.join()
pass
#rc.show_display()
#rc.calibration_sequence()
#game.loop() #game.loop()
# try: # try: