version: "3.9" # First network configuration. Change this to the network interface you want to use primarily. (Better configuration for multiple Network interfaces needed). networks: # rmp: # driver: macvlan # driver_opts: # parent: eno1 # robot network interface # ipam: # config: # - subnet: 192.168.0.0/24 # gateway: 192.168.0.1 # ip_range: 192.168.0.200/25 # aux_addresses: # net-address: 192.168.0.100 #? what is this for --> to exclude addresses from buing used. # make new bridge network for ros rmp: driver: bridge # ipam: # config: # - subnet: # add this mountpoint to all services: - ./customize/entrypoint.sh:/entrypoint.sh # Attention: child services will inherit settings from their parents. So if you set a network_mode: host in the base service, all child services will also use host networking. This is not always what you want. So be careful with this. services: # Base image containing dependencies. base: image: ghcr.io/bjoernellens1/ros2_rmp/rmp:base build: context: . dockerfile: docker/Dockerfile tags: - ghcr.io/bjoernellens1/ros2_rmp/rmp:base args: ROS_DISTRO: humble target: base x-bake: platforms: - linux/arm64 - linux/amd64 # Interactive shell stdin_open: true tty: true # Networking and IPC for ROS 2 network_mode: host ipc: host # Needed to display graphical applications environment: # Allows graphical programs in the container. - DISPLAY=${DISPLAY} - QT_X11_NO_MITSHM=1 - NVIDIA_DRIVER_CAPABILITIES=all # set correct ros2 parameters: domain id and rmw implementation - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp - CYCLONEDDS_URI=file:///cyclonedds.xml volumes: # Allows graphical programs in the container. - /tmp/.X11-unix:/tmp/.X11-unix:rw - ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority - ./customize/entrypoint.sh:/entrypoint.sh - ./customize/cyclonedds_zerotier.xml:/cyclonedds.xml # networks: # not using bridging anymore, instead try using all services on macvlan? let's see... shoudl word for now. Bridging does definitely not work because multicast is not supported here. # rmp: # #ipv4_address: 192.168.0.101 #actually don't need to set ips. they are set automatically by docker-compose. SHould be inherited by all child services. # Overlay image containing the project specific source code. overlay: extends: base image: ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay build: context: . dockerfile: docker/Dockerfile tags: - ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay target: overlay x-bake: platforms: - linux/arm64 - linux/amd64 volumes: - .:/repo command: > /bin/bash # Additional dependencies for GUI applications guis: extends: overlay image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis build: context: . dockerfile: docker/Dockerfile tags: - ghcr.io/bjoernellens1/ros2_rmp/rmp:guis target: guis x-bake: platforms: - linux/arm64 - linux/amd64 command: > /bin/bash # Robot State Publisher rsp: extends: overlay command: > ros2 launch cps_rmp220_support rsp.launch.py # Controller controller: extends: base command: > ros2 run segwayrmp SmartCar --ros-args -r cmd_vel:=cmd_vel_out -p serial_full_name:=/dev/segway devices: - /dev/segway:/dev/segway - /dev/ttyUSB0:/dev/ttyUSB0 # teleop teleop: extends: base depends_on: - controller command: > ros2 launch rmp220_teleop robot_joystick.launch.py devices: - /dev/input/js0:/dev/input/js0 # lidar lidar: extends: overlay depends_on: - lidar_filter command: > ros2 launch cps_rmp220_support robot_lidar.launch.py serial_port:=/dev/rplidarA1 devices: - /dev/rplidarA1:/dev/rplidarA1 #udevrules needed for this to work: # SUBSYSTEM=="tty", ATTRS{serial}=="0001", SYMLINK+="segway" # SUBSYSTEM=="tty", ATTRS{serial}=="3453995662b3af4f81f4a69eba5f3f29", SYMLINK+="rplidarA1" # Lidar filtering node. lidar_filter: extends: overlay command: > ros2 launch cps_rmp220_support robot_scan_filter.launch.py # localiaztion by ekf node ekf: extends: overlay depends_on: - controller - rsp command: > ros2 launch cps_rmp220_support robot_localization.launch.py # mapping mapping: extends: overlay depends_on: - ekf - rsp - lidar command: > ros2 launch cps_rmp220_support robot_mapping.launch.py # slam-toolbox-localization localization: extends: overlay depends_on: - ekf - rsp - lidar command: > ros2 launch cps_rmp220_support robot_mapping_localization.launch.py map_file_name:=/repo/maps/map.yaml # amcl_localization amcl: extends: overlay depends_on: - ekf - rsp - lidar command: > ros2 launch cps_rmp220_support robot_amcl.launch.py map:=/repo/maps/map.yaml # navigation navigation: extends: overlay depends_on: - controller - teleop - rsp - lidar - ekf - oakd command: > ros2 launch nav2_bringup bringup_launch.py slam:=False map:=/repo/maps/map_openlabday.yaml use_sim_time:=False use_composition:=True params_file:=/overlay_ws/src/cps_rmp220_support/config/nav2_params.yaml # bash bash: extends: overlay command: > /bin/bash # rviz2 rviz2: #extends: guis image: ghcr.io/bjoernellens1/ros2_rmp/rmp:guis command: > ros2 launch cps_rmp220_support rviz.launch.py # Needed to display graphical applications privileged: true # really necessary? environment: # Allows graphical programs in the container. - DISPLAY=${DISPLAY} - QT_X11_NO_MITSHM=1 - NVIDIA_DRIVER_CAPABILITIES=all - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp volumes: # Allows graphical programs in the container. - /tmp/.X11-unix:/tmp/.X11-unix:rw - ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority # Foxglove Studio Bridge foxglove_bridge: extends: overlay command: > ros2 launch foxglove_bridge foxglove_bridge_launch.xml port:=8765 # Foxglove Studio Webserver foxglove_webserver: image: ghcr.io/foxglove/studio:latest stdin_open: true tty: true # Networking network_mode: bridge ports: - 8080:8080 depends_on: - foxglove_bridge volumes: - ./foxglove/default.json:/foxglove/default-layout.json # USB Camera Stream cam: extends: overlay command: > ros2 run ros2_cam_openCV cam_node devices: - /dev/video0:/dev/video0 # ROS2 Frontier exploration explorer: extends: overlay depends_on: - controller - teleop - rsp - lidar - ekf - navigation command: > ros2 launch cps_rmp220_support robot_exploration.launch.py ### Images for ROS1 Interactions #ROS1 Bridge ros1bridge: image: ghcr.io/bjoernellens1/ros2_rmp/ros1bridge command: > ros2 run ros1_bridge dynamic_bridge --bridge-all-2to1-topics build: context: . dockerfile: docker/Dockerfile tags: - ghcr.io/bjoernellens1/ros2_rmp/ros1bridge args: ROS_DISTRO: humble target: bridge x-bake: platforms: #- linux/arm64 - linux/amd64 # Networking and IPC for ROS 2 network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp - ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here #ROS1 roscore roscore: command: > roscore extends: ros1bridge network_mode: host ipc: host environment: - ROS_DOMAIN_ID=5 - RMW_IMPLEMENTATION=rmw_cyclonedds_cpp - ROS_MASTER_URI=http://localhost:11311 # is configured to run roscore on the robot but could change to local ros1 machine here # ROS2 oak-d-lite camera oakd: extends: overlay command: > ros2 launch depthai_examples stereo.launch.py #devices: #- /dev/oakd-lite:/dev/oakd-lite # need corresponding udevrules for this to work: # SUBSYSTEM=="usb", ATTRS{idVendor}=="03e7", MODE="0666", SYMLINK+="oakd-lite" #- /dev/:/dev/ device_cgroup_rules: - 'c 189:* rmw' volumes: - /dev/bus/usb:/dev/bus/usb # for testing the oak-d-lite camera -> works now with cgroup rules depthai: image: luxonis/depthai:latest command: > python3 /depthai/depthai_demo.py stdin_open: true tty: true device_cgroup_rules: - 'c 189:* rmw' volumes: - /dev/bus/usb:/dev/bus/usb environment: - DISPLAY=${DISPLAY} - QT_X11_NO_MITSHM=1 - NVIDIA_DRIVER_CAPABILITIES=all zerotier: image: "zyclonite/zerotier:router" container_name: zerotier-one devices: - /dev/net/tun network_mode: host volumes: - '/var/lib/zerotier-one:/var/lib/zerotier-one' cap_add: - NET_ADMIN - SYS_ADMIN - NET_RAW restart: unless-stopped environment: - TZ=Europe/Amsterdam - PUID=1000 - PGID=1000 - ZEROTIER_ONE_LOCAL_PHYS=enp5s0 wlx00e04c5513fc # change for actual interfaces - ZEROTIER_ONE_USE_IPTABLES_NFT=false - ZEROTIER_ONE_GATEWAY_MODE=inbound # might change to both ways #- ZEROTIER_ONE_NETWORK_IDS= # does not really do much?