version: "3.9" # First network configuration. Change this to the network interface you want to use primarily. (Better configuration for multiple Network interfaces needed). networks: # rmp: # driver: macvlan # driver_opts: # parent: eno1 # robot network interface # ipam: # config: # - subnet: 192.168.0.0/24 # gateway: 192.168.0.1 # ip_range: 192.168.0.200/25 # aux_addresses: # net-address: 192.168.0.100 #? what is this for --> to exclude addresses from buing used. # make new bridge network for ros rmp: driver: bridge # ipam: # config: # - subnet: services: # Base image containing dependencies. base: image: ghcr.io/bjoernellens1/ros2_rmp/rmp:base build: context: . dockerfile: docker/Dockerfile tags: - ghcr.io/bjoernellens1/ros2_rmp/rmp:base args: ROS_DISTRO: humble target: base x-bake: platforms: - linux/arm64 - linux/amd64 # Interactive shell stdin_open: true tty: true # Networking and IPC for ROS 2 network_mode: host ipc: host # Needed to display graphical applications #privileged: true environment: # Allows graphical programs in the container. - DISPLAY=${DISPLAY} - QT_X11_NO_MITSHM=1 - NVIDIA_DRIVER_CAPABILITIES=all volumes: # Allows graphical programs in the container. - /tmp/.X11-unix:/tmp/.X11-unix:rw - ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority # networks: # not using bridging anymore, instead try using all services on macvlan? let's see... shoudl word for now. Bridging does definitely not work because multicast is not supported here. # rmp: # #ipv4_address: 192.168.0.101 #actually don't need to set ips. they are set automatically by docker-compose. SHould be inherited by all child services. # Overlay image containing the project specific source code. overlay: extends: base image: ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay build: context: . dockerfile: docker/Dockerfile tags: - ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay target: overlay x-bake: platforms: - linux/arm64 - linux/amd64 volumes: - .:/repo command: > /bin/bash # Additional dependencies for GUI applications guis: extends: overlay image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis build: context: . dockerfile: docker/Dockerfile tags: - ghcr.io/bjoernellens1/ros2_rmp/rmp:guis target: guis x-bake: platforms: - linux/arm64 - linux/amd64 #entrypoint: /bin/bash command: > /bin/bash # Robot State Publisher rsp: extends: overlay command: > ros2 launch cps_rmp220_support rsp.launch.py stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp # Controller controller: extends: base command: > ros2 run segwayrmp SmartCar --ros-args -r cmd_vel:=cmd_vel_out -p serial_full_name:=/dev/segway devices: - /dev/segway:/dev/segway - /dev/ttyUSB0:/dev/ttyUSB0 # Interactive shell stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host # Needed to display graphical applications privileged: false # depends_on: # - robot_state_publisher environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp # teleop teleop: extends: base depends_on: - controller command: > ros2 launch rmp220_teleop robot_joystick.launch.py devices: - /dev/input/js0:/dev/input/js0 # Interactive shell stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host # Needed to display graphical applications privileged: true environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp # lidar lidar: extends: overlay command: > ros2 launch cps_rmp220_support robot_lidar.launch.py serial_port:=/dev/rplidarA1 stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host # depends_on: # - robot_state_publisher environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp devices: - /dev/rplidarA1:/dev/rplidarA1 #udevrules needed for this to work: # SUBSYSTEM=="tty", ATTRS{serial}=="0001", SYMLINK+="segway" # SUBSYSTEM=="tty", ATTRS{serial}=="3453995662b3af4f81f4a69eba5f3f29", SYMLINK+="rplidarA1" # localiaztion by ekf node ekf: extends: overlay depends_on: - controller - rsp command: > ros2 launch cps_rmp220_support robot_localization.launch.py # Interactive shell stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp # mapping mapping: extends: overlay depends_on: - ekf - rsp - lidar command: > ros2 launch cps_rmp220_support robot_mapping.launch.py # Interactive shell stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp # slam-toolbox-localization localization: extends: overlay depends_on: - ekf - rsp - lidar command: > ros2 launch cps_rmp220_support robot_mapping_localization.launch.py map_file_name:=/repo/maps/map.yaml # Interactive shell stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp # amcl_localization amcl: extends: overlay depends_on: - ekf - rsp - lidar command: > ros2 launch cps_rmp220_support robot_amcl.launch.py map:=/repo/maps/map.yaml # Interactive shell stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp # navigation navigation: extends: overlay depends_on: - controller - teleop - rsp - lidar - ekf - oakd # command: > # ros2 launch cps_rmp220_support robot_navigation.launch.py # map_subscribe_transient_local:=true command: > ros2 launch nav2_bringup bringup_launch.py slam:=True map:=/repo/maps/map_current.sav.yaml use_sim_time:=False use_composition:=True params_file:=/overlay_ws/src/cps_rmp220_support/config/nav2_params.yaml stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp # bash bash: extends: overlay command: > /bin/bash stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp # rviz2 rviz2: #extends: guis image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis # command: > # ros2 launch cps_rmp220_support robot_rviz2.launch.py command: > ros2 launch cps_rmp220_support rviz.launch.py # Interactive shell stdin_open: true tty: true # Networking and IPC for ROS 2 network_mode: host ipc: host # Needed to display graphical applications privileged: true environment: # Allows graphical programs in the container. - DISPLAY=${DISPLAY} - QT_X11_NO_MITSHM=1 - NVIDIA_DRIVER_CAPABILITIES=all - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp volumes: # Allows graphical programs in the container. - /tmp/.X11-unix:/tmp/.X11-unix:rw - ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority # Foxglove Studio Bridge foxglove_bridge: extends: overlay command: > ros2 launch foxglove_bridge foxglove_bridge_launch.xml port:=8765 stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp networks: # not using bridging anymore, instead try using all services on macvlan? let's see... shoudl word for now. Bridging does definitely not work because multicast is not supported here. rmp: ipv4_address: 192.168.0.201 #actually don't need to set ips. they are set automatically by docker-compose. SHould be inherited by all child services. # Foxglove Studio Webserver foxglove_webserver: image: ghcr.io/foxglove/studio:latest stdin_open: true tty: true # Networking network_mode: bridge ports: - 8080:8080 depends_on: - foxglove_bridge volumes: - ./foxglove/default.json:/foxglove/default-layout.json # USB Camera Stream cam: extends: overlay command: > ros2 run ros2_cam_openCV cam_node stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp devices: - /dev/video0:/dev/video0 # ROS2 Frontier exploration explorer: extends: overlay depends_on: - controller - teleop - rsp - lidar - ekf - navigation command: > ros2 launch cps_rmp220_support robot_exploration.launch.py stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp ### Images for ROS1 Interactions #ROS1 Bridge ros1bridge: image: ghcr.io/bjoernellens1/ros2_rmp/ros1bridge command: > ros2 run ros1_bridge dynamic_bridge --bridge-all-2to1-topics build: context: . dockerfile: docker/Dockerfile tags: - ghcr.io/bjoernellens1/ros2_rmp/ros1bridge args: ROS_DISTRO: humble target: bridge x-bake: platforms: #- linux/arm64 - linux/amd64 # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp - ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here #ROS1 roscore roscore: command: > roscore extends: ros1bridge #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp - ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here # ROS2 oak-d-lite camera oakd: extends: overlay command: > ros2 launch depthai_examples stereo.launch.py stdin_open: true tty: true # Networking and IPC for ROS 2 #network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp #privileged: true #devices: #- /dev/oakd-lite:/dev/oakd-lite # need corresponding udevrules for this to work: # SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666", SYMLINK+="oakd-lite" #- /dev/:/dev/ device_cgroup_rules: - 'c 189:* rmw' volumes: - /dev/bus/usb:/dev/bus/usb # for testing the oak-d-lite camera -> works now with cgroup rules depthai: image: luxonis/depthai:latest command: > python3 /depthai/depthai_demo.py stdin_open: true tty: true device_cgroup_rules: - 'c 189:* rmw' volumes: - /dev/bus/usb:/dev/bus/usb environment: - DISPLAY=${DISPLAY} - QT_X11_NO_MITSHM=1 - NVIDIA_DRIVER_CAPABILITIES=all