ros2_rmp/docker-compose.yaml
2024-03-28 09:53:35 +01:00

429 lines
12 KiB
YAML

version: "3.9"
services:
# Base image containing dependencies.
base:
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:base
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/ros2_rmp/rmp:base
args:
ROS_DISTRO: humble
target: base
x-bake:
platforms:
- linux/arm64
- linux/amd64
# Interactive shell
stdin_open: true
tty: true
# Networking and IPC for ROS 2
network_mode: host
ipc: host
# Needed to display graphical applications
environment:
# Allows graphical programs in the container.
- DISPLAY=${DISPLAY}
- QT_X11_NO_MITSHM=1
- NVIDIA_DRIVER_CAPABILITIES=all
# set correct ros2 parameters: domain id and rmw implementation
- ROS_DOMAIN_ID=5
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
- CYCLONEDDS_URI=file:///cyclonedds.xml
volumes:
# Allows graphical programs in the container.
- /tmp/.X11-unix:/tmp/.X11-unix:rw
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
- ./config/entrypoint.sh:/entrypoint.sh
- ./config/cyclonedds.xml:/cyclonedds.xml
# networks: # not using bridging anymore, instead try using all services on macvlan? let's see... shoudl word for now. Bridging does definitely not work because multicast is not supported here.
# rmp:
# #ipv4_address: 192.168.0.101 #actually don't need to set ips. they are set automatically by docker-compose. SHould be inherited by all child services.
# Overlay image containing the project specific source code.
overlay:
extends: base
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay
target: overlay
x-bake:
platforms:
- linux/arm64
- linux/amd64
volumes:
- .:/repo
command: >
/bin/bash
# Additional dependencies for GUI applications
guis:
extends: overlay
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/ros2_rmp/rmp:guis
target: guis
x-bake:
platforms:
- linux/arm64
- linux/amd64
command: >
/bin/bash
devices:
- /dev/dri:/dev/dri
# Robot State Publisher
rsp:
extends: overlay
command: >
ros2 launch cps_rmp220_support rsp.launch.py
# Controller
controller:
extends: base
command: >
ros2 run segwayrmp SmartCar --ros-args -r cmd_vel:=cmd_vel_out -p serial_full_name:=/dev/segway
devices:
- /dev/segway:/dev/ttyUSB0
#- /dev/ttyUSB0:/dev/ttyUSB0
privileged: true
# teleop
teleop:
extends: base
depends_on:
- controller
command: >
ros2 launch rmp220_teleop robot_joystick.launch.py
devices:
- /dev/input/js0:/dev/input/js0
#- /dev/input/by-id/usb-Logitech_Wireless_Gamepad_F710_56679674-joystick:/dev/input/by-id/usb-Logitech_Wireless_Gamepad_F710_56679674-joystick
privileged: true
# lidar
lidar:
extends: overlay
depends_on:
- lidar_filter
command: >
ros2 launch cps_rmp220_support robot_lidar.launch.py serial_port:=/dev/rplidarA1
devices:
- /dev/rplidarA1:/dev/rplidarA1 #udevrules needed for this to work:
# SUBSYSTEM=="tty", ATTRS{serial}=="0001", SYMLINK+="segway"
# SUBSYSTEM=="tty", ATTRS{serial}=="3453995662b3af4f81f4a69eba5f3f29", SYMLINK+="rplidarA1"
# Lidar filtering node.
lidar_filter:
extends: overlay
command: >
ros2 launch cps_rmp220_support robot_scan_filter.launch.py
# localiaztion by ekf node
ekf:
extends: overlay
depends_on:
- controller
- rsp
command: >
ros2 launch cps_rmp220_support robot_localization.launch.py
# mapping
mapping:
extends: overlay
depends_on:
- ekf
- rsp
- lidar
command: >
ros2 launch cps_rmp220_support robot_mapping.launch.py
# slam-toolbox-localization
localization:
extends: overlay
depends_on:
- ekf
- rsp
- lidar
command: >
ros2 launch cps_rmp220_support robot_mapping_localization.launch.py map_file_name:=/repo/maps/map.yaml
# amcl_localization
amcl:
extends: overlay
depends_on:
- ekf
- rsp
- lidar
command: >
ros2 launch cps_rmp220_support robot_amcl.launch.py map:=/repo/maps/map.yaml
# navigation
navigation:
extends: overlay
depends_on:
- controller
- teleop
- rsp
- lidar
- ekf
- oakd
command: >
ros2 launch nav2_bringup bringup_launch.py slam:=True map:=/repo/maps/map.yaml use_sim_time:=False use_composition:=True params_file:=/repo/config/nav2_params.yaml
#maps/map_openlabday.yaml
# bash
bash:
extends: overlay
command: >
/bin/bash
# rviz2
rviz2:
#extends: guis
image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis
command: >
ros2 launch cps_rmp220_support rviz.launch.py
# Needed to display graphical applications
privileged: true # really necessary?
environment:
# Allows graphical programs in the container.
- DISPLAY=${DISPLAY}
- QT_X11_NO_MITSHM=1
- NVIDIA_DRIVER_CAPABILITIES=all
- ROS_DOMAIN_ID=5
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
volumes:
# Allows graphical programs in the container.
- /tmp/.X11-unix:/tmp/.X11-unix:rw
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
# Foxglove Studio Bridge
foxglove_bridge:
extends: overlay
command: >
ros2 launch foxglove_bridge foxglove_bridge_launch.xml port:=8765
# Foxglove Studio Webserver
foxglove:
image: ghcr.io/foxglove/studio:latest
stdin_open: true
tty: true
# Networking
network_mode: bridge
networks:
- caddy_network
ports:
- 8080:8080
depends_on:
- foxglove_bridge
volumes:
- ./config/foxglove/default.json:/foxglove/default-layout.json
# USB Camera Stream
cam:
extends: overlay
command: >
ros2 run ros2_cam_openCV cam_node
devices:
- /dev/video0:/dev/video0
# ROS2 Frontier exploration
explorer:
extends: overlay
depends_on:
- controller
- teleop
- rsp
- lidar
- ekf
- navigation
command: >
ros2 launch cps_rmp220_support robot_exploration.launch.py
### Images for ROS1 Interactions
#ROS1 Bridge
# ros1bridge:
# image: ghcr.io/bjoernellens1/ros2_rmp/ros1bridge
# command: >
# ros2 run ros1_bridge dynamic_bridge --bridge-all-2to1-topics
# build:
# context: .
# dockerfile: docker/Dockerfile
# tags:
# - ghcr.io/bjoernellens1/ros2_rmp/ros1bridge
# args:
# ROS_DISTRO: humble
# target: bridge
# x-bake:
# platforms:
# #- linux/arm64
# - linux/amd64
# # Networking and IPC for ROS 2
# network_mode: host
# ipc: host
# environment:
# - ROS_DOMAIN_ID=5
# - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
# - ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here
#ROS1 roscore
# roscore:
# command: >
# roscore
# extends: ros1bridge
# network_mode: host
# ipc: host
# environment:
# - ROS_DOMAIN_ID=5
# - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
# - ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here
## Configure on ROS1 Hosts
# seggy 192.168.0.100
# locally running ros-package: control1
# subscribing topic2
# publishing topic1
# robot2 192.168.x.x
# locally running ros-package: control2
# subscribing topic1
# publishing topic2
# As we need one ros-master to control the communication, we choose 192.168.1.1 as master. Therefore we execute locally on robot 1:
# export ROS_MASTER_URI=http://192.168.0.100:11311 # or localhost?
# export ROS_HOSTNAME=192.168.0.100
# export ROS_IP=192.168.0.100
# roscore
# In order to connect to the ROS-master, we execute locally on robot2:
# export ROS_MASTER_URI=http://192.1.1.1:11311
# export ROS_IP=192.168.1.2
# export ROS_HOSTNAME=192.168.1.2
# ROS2 oak-d-lite camera
oakd:
extends: overlay
command: >
ros2 launch depthai_examples stereo.launch.py
#devices:
#- /dev/oakd-lite:/dev/oakd-lite # need corresponding udevrules for this to work:
# SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666", SYMLINK+="oakd-lite"
#- /dev/:/dev/
device_cgroup_rules:
- 'c 189:* rmw'
volumes:
- /dev/bus/usb:/dev/bus/usb
# for testing the oak-d-lite camera -> works now with cgroup rules
depthai:
image: luxonis/depthai:latest
command: >
python3 /depthai/depthai_demo.py
stdin_open: true
tty: true
device_cgroup_rules:
- 'c 189:* rmw'
volumes:
- /dev/bus/usb:/dev/bus/usb
environment:
- DISPLAY=${DISPLAY}
- QT_X11_NO_MITSHM=1
- NVIDIA_DRIVER_CAPABILITIES=all
################################################################################################################################
# Core Services for Web Management #
################################################################################################################################
caddy:
image: caddy:latest
networks:
- caddy_network
extra_hosts:
- "host.docker.internal:host-gateway"
ports:
- "80:80"
- "443:443"
volumes:
- ./config/caddy/Caddyfile:/etc/caddy/Caddyfile
- ./config/caddy/content:/usr/share/caddy/
restart: always
depends_on:
- foxglove
- olivetin
roscore:
image: husarion/ros1-bridge:foxy-0.9.6-20230327-stable
command: |
bash -c "source /opt/ros/noetic/setup.bash && roscore"
network_mode: host
environment:
- ROS_DOMAIN_ID=5
restart: always
ros1bridge:
image: husarion/ros1-bridge:foxy-0.9.6-20230327-stable
command: |
ros2 run ros1_bridge dynamic_bridge
network_mode: host
ipc: host
environment:
- ROS_DOMAIN_ID=5
- RMW_IMPLEMENTATION=rmw_cyclonedds_cpp
restart: always
webui-joystick:
image: husarion/webui-ros-joystick:noetic-0.0.1-20230510-stable
network_mode: host
# networks:
# - caddy_network
# ports:
# - 8000:8000
ipc: host
environment:
- ROS_DOMAIN_ID=5
restart: always
depends_on:
- ros1bridge
command: roslaunch webui-ros-joystick webui.launch
olivetin:
container_name: olivetin
image: jamesread/olivetin
#image: ghcr.io/bjoernellens1/cps_bot_mini_ws/olivetin
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/cps_bot_mini_ws/olivetin
target: olivetin
x-bake:
platforms:
#- linux/arm64
- linux/amd64
user: root
privileged: true
volumes:
- ./config/olivetin/:/config/ # here is config.yaml and icons located
- ./config/olivetin/icons:/var/www/olivetin/customIcons
- .:/repo
- /var/run/docker.sock:/var/run/docker.sock
# - /var/lib/docker:/var/lib/docker
- ~/.ssh/id_rsa:/root/.ssh/id_rsa
networks:
- caddy_network
ports:
- "1337:1337"
restart: always
################################################################################################################################
# Docker related extra stuff #
################################################################################################################################
networks:
caddy_network:
driver: bridge