389 lines
11 KiB
YAML
389 lines
11 KiB
YAML
version: "3.9"
|
|
|
|
# First network configuration. Change this to the network interface you want to use primarily. (Better configuration for multiple Network interfaces needed).
|
|
networks:
|
|
# rmp:
|
|
# driver: macvlan
|
|
# driver_opts:
|
|
# parent: eno1 # robot network interface
|
|
# ipam:
|
|
# config:
|
|
# - subnet: 192.168.0.0/24
|
|
# gateway: 192.168.0.1
|
|
# ip_range: 192.168.0.200/25
|
|
# aux_addresses:
|
|
# net-address: 192.168.0.100 #? what is this for --> to exclude addresses from buing used.
|
|
# make new bridge network for ros
|
|
rmp:
|
|
driver: bridge
|
|
# ipam:
|
|
# config:
|
|
# - subnet:
|
|
|
|
# add this mountpoint to all services: - ./customize/entrypoint.sh:/entrypoint.sh
|
|
# Attention: child services will inherit settings from their parents. So if you set a network_mode: host in the base service, all child services will also use host networking. This is not always what you want. So be careful with this.
|
|
|
|
services:
|
|
# Base image containing dependencies.
|
|
base:
|
|
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:base
|
|
build:
|
|
context: .
|
|
dockerfile: docker/Dockerfile
|
|
tags:
|
|
- ghcr.io/bjoernellens1/ros2_rmp/rmp:base
|
|
args:
|
|
ROS_DISTRO: humble
|
|
target: base
|
|
x-bake:
|
|
platforms:
|
|
- linux/arm64
|
|
- linux/amd64
|
|
# Interactive shell
|
|
stdin_open: true
|
|
tty: true
|
|
# Networking and IPC for ROS 2
|
|
network_mode: host
|
|
ipc: host
|
|
# Needed to display graphical applications
|
|
environment:
|
|
# Allows graphical programs in the container.
|
|
- DISPLAY=${DISPLAY}
|
|
- QT_X11_NO_MITSHM=1
|
|
- NVIDIA_DRIVER_CAPABILITIES=all
|
|
# set correct ros2 parameters: domain id and rmw implementation
|
|
- ROS_DOMAIN_ID=5
|
|
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
|
|
- CYCLONEDDS_URI=file:///cyclonedds.xml
|
|
volumes:
|
|
# Allows graphical programs in the container.
|
|
- /tmp/.X11-unix:/tmp/.X11-unix:rw
|
|
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
|
|
- ./config/entrypoint.sh:/entrypoint.sh
|
|
- ./config/cyclonedds.xml:/cyclonedds.xml
|
|
restart: unless-stopped
|
|
|
|
# networks: # not using bridging anymore, instead try using all services on macvlan? let's see... shoudl word for now. Bridging does definitely not work because multicast is not supported here.
|
|
# rmp:
|
|
# #ipv4_address: 192.168.0.101 #actually don't need to set ips. they are set automatically by docker-compose. SHould be inherited by all child services.
|
|
|
|
# Overlay image containing the project specific source code.
|
|
overlay:
|
|
extends: base
|
|
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay
|
|
build:
|
|
context: .
|
|
dockerfile: docker/Dockerfile
|
|
tags:
|
|
- ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay
|
|
target: overlay
|
|
x-bake:
|
|
platforms:
|
|
- linux/arm64
|
|
- linux/amd64
|
|
volumes:
|
|
- .:/repo
|
|
command: >
|
|
/bin/bash
|
|
|
|
# Additional dependencies for GUI applications
|
|
guis:
|
|
extends: overlay
|
|
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis
|
|
build:
|
|
context: .
|
|
dockerfile: docker/Dockerfile
|
|
tags:
|
|
- ghcr.io/bjoernellens1/ros2_rmp/rmp:guis
|
|
target: guis
|
|
x-bake:
|
|
platforms:
|
|
- linux/arm64
|
|
- linux/amd64
|
|
command: >
|
|
/bin/bash
|
|
devices:
|
|
- /dev/dri:/dev/dri
|
|
|
|
# Robot State Publisher
|
|
rsp:
|
|
extends: overlay
|
|
command: >
|
|
ros2 launch cps_rmp220_support rsp.launch.py
|
|
|
|
# Controller
|
|
controller:
|
|
extends: base
|
|
command: >
|
|
ros2 run segwayrmp SmartCar --ros-args -r cmd_vel:=cmd_vel_out -p serial_full_name:=/dev/segway
|
|
devices:
|
|
- /dev/segway:/dev/ttyUSB0
|
|
#- /dev/ttyUSB0:/dev/ttyUSB0
|
|
privileged: true
|
|
|
|
# teleop
|
|
teleop:
|
|
extends: base
|
|
depends_on:
|
|
- controller
|
|
command: >
|
|
ros2 launch rmp220_teleop robot_joystick.launch.py
|
|
devices:
|
|
- /dev/input/js0:/dev/input/js0
|
|
#- /dev/input/by-id/usb-Logitech_Wireless_Gamepad_F710_56679674-joystick:/dev/input/by-id/usb-Logitech_Wireless_Gamepad_F710_56679674-joystick
|
|
privileged: true
|
|
|
|
# lidar
|
|
lidar:
|
|
extends: overlay
|
|
depends_on:
|
|
- lidar_filter
|
|
command: >
|
|
ros2 launch cps_rmp220_support robot_lidar.launch.py serial_port:=/dev/rplidarA1
|
|
devices:
|
|
- /dev/rplidarA1:/dev/rplidarA1 #udevrules needed for this to work:
|
|
# SUBSYSTEM=="tty", ATTRS{serial}=="0001", SYMLINK+="segway"
|
|
# SUBSYSTEM=="tty", ATTRS{serial}=="3453995662b3af4f81f4a69eba5f3f29", SYMLINK+="rplidarA1"
|
|
|
|
# Lidar filtering node.
|
|
lidar_filter:
|
|
extends: overlay
|
|
command: >
|
|
ros2 launch cps_rmp220_support robot_scan_filter.launch.py
|
|
|
|
# localization by ekf node
|
|
ekf:
|
|
extends: overlay
|
|
depends_on:
|
|
- controller
|
|
- rsp
|
|
command: >
|
|
ros2 launch cps_rmp220_support robot_localization.launch.py
|
|
|
|
# mapping
|
|
mapping:
|
|
extends: overlay
|
|
depends_on:
|
|
- ekf
|
|
- rsp
|
|
- lidar
|
|
command: >
|
|
ros2 launch cps_rmp220_support robot_mapping.launch.py
|
|
|
|
# slam-toolbox-localization
|
|
localization:
|
|
extends: overlay
|
|
depends_on:
|
|
- ekf
|
|
- rsp
|
|
- lidar
|
|
command: >
|
|
ros2 launch cps_rmp220_support robot_mapping_localization.launch.py map_file_name:=/repo/maps/map.yaml
|
|
|
|
# amcl_localization
|
|
amcl:
|
|
extends: overlay
|
|
depends_on:
|
|
- ekf
|
|
- rsp
|
|
- lidar
|
|
command: >
|
|
ros2 launch cps_rmp220_support robot_amcl.launch.py map:=/repo/maps/map.yaml
|
|
|
|
# navigation
|
|
navigation:
|
|
extends: overlay
|
|
depends_on:
|
|
- controller
|
|
- teleop
|
|
- rsp
|
|
- lidar
|
|
- ekf
|
|
- oakd
|
|
command: >
|
|
ros2 launch nav2_bringup bringup_launch.py slam:=False map:=/repo/maps/map_01-26-24.yaml use_sim_time:=False use_composition:=True params_file:=/repo/config/nav2_params.yaml
|
|
#maps/map_openlabday.yaml
|
|
# bash
|
|
bash:
|
|
extends: overlay
|
|
command: >
|
|
/bin/bash
|
|
|
|
# rviz2
|
|
rviz2:
|
|
#extends: guis
|
|
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis
|
|
command: >
|
|
ros2 launch cps_rmp220_support rviz.launch.py
|
|
# Needed to display graphical applications
|
|
privileged: true # really necessary?
|
|
environment:
|
|
# Allows graphical programs in the container.
|
|
- DISPLAY=${DISPLAY}
|
|
- QT_X11_NO_MITSHM=1
|
|
- NVIDIA_DRIVER_CAPABILITIES=all
|
|
- ROS_DOMAIN_ID=5
|
|
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
|
|
volumes:
|
|
# Allows graphical programs in the container.
|
|
- /tmp/.X11-unix:/tmp/.X11-unix:rw
|
|
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
|
|
|
|
# Foxglove Studio Bridge
|
|
foxglove_bridge:
|
|
extends: overlay
|
|
command: >
|
|
ros2 launch foxglove_bridge foxglove_bridge_launch.xml port:=8765
|
|
#tls:=true certfile:=/certs/foxgloveCert.crt keyfile:=/certs/foxgloveKey.key
|
|
volumes:
|
|
- /opt/cps/certs:/certs
|
|
|
|
# Foxglove Studio Webserver
|
|
foxglove:
|
|
image: ghcr.io/foxglove/studio:latest
|
|
stdin_open: true
|
|
tty: true
|
|
# Networking
|
|
network_mode: bridge
|
|
ports:
|
|
- 8080:8080
|
|
depends_on:
|
|
- foxglove_bridge
|
|
volumes:
|
|
- ./foxglove/default.json:/foxglove/default-layout.json
|
|
|
|
# USB Camera Stream
|
|
cam:
|
|
extends: overlay
|
|
command: >
|
|
ros2 run ros2_cam_openCV cam_node
|
|
devices:
|
|
- /dev/video0:/dev/video0
|
|
|
|
|
|
# ROS2 Frontier exploration
|
|
explorer:
|
|
extends: overlay
|
|
depends_on:
|
|
- controller
|
|
- teleop
|
|
- rsp
|
|
- lidar
|
|
- ekf
|
|
- navigation
|
|
command: >
|
|
ros2 launch cps_rmp220_support robot_exploration.launch.py
|
|
|
|
### Images for ROS1 Interactions
|
|
#ROS1 Bridge
|
|
ros1bridge:
|
|
image: ghcr.io/bjoernellens1/ros2_rmp/ros1bridge
|
|
command: >
|
|
ros2 run ros1_bridge dynamic_bridge --bridge-all-2to1-topics
|
|
build:
|
|
context: .
|
|
dockerfile: docker/Dockerfile
|
|
tags:
|
|
- ghcr.io/bjoernellens1/ros2_rmp/ros1bridge
|
|
args:
|
|
ROS_DISTRO: humble
|
|
target: bridge
|
|
x-bake:
|
|
platforms:
|
|
#- linux/arm64
|
|
- linux/amd64
|
|
# Networking and IPC for ROS 2
|
|
network_mode: host
|
|
ipc: host
|
|
environment:
|
|
- ROS_DOMAIN_ID=5
|
|
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
|
|
- ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here
|
|
|
|
#ROS1 roscore
|
|
roscore:
|
|
command: >
|
|
roscore
|
|
extends: ros1bridge
|
|
network_mode: host
|
|
ipc: host
|
|
environment:
|
|
- ROS_DOMAIN_ID=5
|
|
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
|
|
- ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here
|
|
|
|
## Configure on ROS1 Hosts
|
|
# seggy 192.168.0.100
|
|
# locally running ros-package: control1
|
|
# subscribing topic2
|
|
# publishing topic1
|
|
|
|
# robot2 192.168.x.x
|
|
# locally running ros-package: control2
|
|
# subscribing topic1
|
|
# publishing topic2
|
|
# As we need one ros-master to control the communication, we choose 192.168.1.1 as master. Therefore we execute locally on robot 1:
|
|
|
|
# export ROS_MASTER_URI=http://192.168.0.100:11311 # or localhost?
|
|
# export ROS_HOSTNAME=192.168.0.100
|
|
# export ROS_IP=192.168.0.100
|
|
# roscore
|
|
|
|
# In order to connect to the ROS-master, we execute locally on robot2:
|
|
|
|
# export ROS_MASTER_URI=http://192.168.0.100:11311
|
|
# export ROS_IP=192.168.0.?
|
|
# export ROS_HOSTNAME=192.168.1.?
|
|
|
|
# ROS2 oak-d-lite camera
|
|
oakd:
|
|
extends: overlay
|
|
command: >
|
|
ros2 launch depthai_examples stereo.launch.py
|
|
#devices:
|
|
#- /dev/oakd-lite:/dev/oakd-lite # need corresponding udevrules for this to work:
|
|
# SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666", SYMLINK+="oakd-lite"
|
|
#- /dev/:/dev/
|
|
device_cgroup_rules:
|
|
- 'c 189:* rmw'
|
|
volumes:
|
|
- /dev/bus/usb:/dev/bus/usb
|
|
|
|
# for testing the oak-d-lite camera -> works now with cgroup rules
|
|
depthai:
|
|
image: luxonis/depthai:latest
|
|
command: >
|
|
python3 /depthai/depthai_demo.py
|
|
stdin_open: true
|
|
tty: true
|
|
device_cgroup_rules:
|
|
- 'c 189:* rmw'
|
|
volumes:
|
|
- /dev/bus/usb:/dev/bus/usb
|
|
environment:
|
|
- DISPLAY=${DISPLAY}
|
|
- QT_X11_NO_MITSHM=1
|
|
- NVIDIA_DRIVER_CAPABILITIES=all
|
|
|
|
zerotier:
|
|
image: "zyclonite/zerotier:router"
|
|
container_name: zerotier-one
|
|
devices:
|
|
- /dev/net/tun
|
|
network_mode: host
|
|
volumes:
|
|
- '/var/lib/zerotier-one:/var/lib/zerotier-one'
|
|
cap_add:
|
|
- NET_ADMIN
|
|
- SYS_ADMIN
|
|
- NET_RAW
|
|
restart: unless-stopped
|
|
environment:
|
|
- TZ=Europe/Amsterdam
|
|
- PUID=1000
|
|
- PGID=1000
|
|
- ZEROTIER_ONE_LOCAL_PHYS=enp5s0 wlx00e04c5513fc # change for actual interfaces
|
|
- ZEROTIER_ONE_USE_IPTABLES_NFT=false
|
|
- ZEROTIER_ONE_GATEWAY_MODE=inbound # might change to both ways
|
|
#- ZEROTIER_ONE_NETWORK_IDS= # does not really do much?
|