add devcontainer config - WIP

This commit is contained in:
Björn Ellensohn 2023-10-23 14:51:46 +02:00
parent fdcba432ac
commit 2900d511fd
3 changed files with 69 additions and 179 deletions

View File

@ -0,0 +1,42 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/docker-existing-docker-compose
{
"name": "Existing Docker Compose (Extend)",
// Update the 'dockerComposeFile' list if you have more compose files or use different names.
// The .devcontainer/docker-compose.yml file contains any overrides you need/want to make.
"dockerComposeFile": [
"../docker-compose.yaml",
"docker-compose.yml"
],
// The 'service' property is the name of the service for the container that VS Code should
// use. Update this value and .devcontainer/docker-compose.yml to the real service name.
"service": "my_example",
// The optional 'workspaceFolder' property is the path VS Code should open by default when
// connected. This is typically a file mount in .devcontainer/docker-compose.yml
"workspaceFolder": "/workspaces/${localWorkspaceFolderBasename}"
// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},
// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],
// Uncomment the next line if you want start specific services in your Docker Compose config.
// "runServices": [],
// Uncomment the next line if you want to keep your containers running after VS Code shuts down.
// "shutdownAction": "none",
// Uncomment the next line to run commands after the container is created.
// "postCreateCommand": "cat /etc/os-release",
// Configure tool-specific properties.
// "customizations": {},
// Uncomment to connect as an existing user other than the container default. More info: https://aka.ms/dev-containers-non-root.
//"remoteUser": "ros"
}

View File

@ -0,0 +1,26 @@
version: '3.9'
services:
# Update this to the name of the service you want to work with in your docker-compose.yml file
my_example:
# Uncomment if you want to override the service's Dockerfile to one in the .devcontainer
# folder. Note that the path of the Dockerfile and context is relative to the *primary*
# docker-compose.yml file (the first in the devcontainer.json "dockerComposeFile"
# array). The sample below assumes your primary file is in the root of your project.
#
# build:
# context: .
# dockerfile: .devcontainer/Dockerfile
volumes:
# Update this to wherever you want VS Code to mount the folder of your project
- ..:/workspaces:cached
# Uncomment the next four lines if you will use a ptrace-based debugger like C++, Go, and Rust.
# cap_add:
# - SYS_PTRACE
# security_opt:
# - seccomp:unconfined
# Overrides default command so things don't shut down after the process ends.
command: /bin/sh -c "while sleep 1000; do :; done"

View File

@ -27,182 +27,4 @@ services: # in docker compose, service are definitions of your docker containers
# Allows graphical programs in the container. # Allows graphical programs in the container.
- /tmp/.X11-unix:/tmp/.X11-unix:rw - /tmp/.X11-unix:/tmp/.X11-unix:rw
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority - ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
network_mode: host # host networking makes life easier network_mode: host # host networking makes life easier
# The rest of this file is an example of how I used docker, docker-compose and buildx bake to fully automate image building for the Segway RMP-lite 220 robot platform.
# Base image containing dependencies for example the robot controller
base:
image: ghcr.io/bjoernellens1/ros2_docker_template/ros2:base # This tells docker where to look for the image remotely, so you don't need to build the image on your PC. You might replace this with your own container registry
build:
context: .
dockerfile: docker/Dockerfile # the Dockerfile specifies the steps taken in the build process.
tags:
- ghcr.io/bjoernellens1/ros2_docker_template/ros2:base # this will be the output image tag. You might replace this with your own container registry
args: # specify build arguments. May use them to alter the build process.
ROS_DISTRO: humble # For instance you could change to foxy here.
target: base # here you specify the build target in the Dockerfile
x-bake:
platforms:
#- linux/arm64 uncomment if arm64 platform is needed (like jetson nano or raspberry pi)
- linux/amd64
# Interactive shell enable
stdin_open: true
tty: true
# Networking and IPC for ROS 2 --> uncomment those 2 entries if working with ros2, otherwise you might just delete this.
network_mode: host # host networking makes life easier
ipc: host # --> faster communications between containers
# Needed to display graphical applications
#privileged: true # Should only be set to true in development phase. This parameter is not neccessary needed, as it is very much possible to set the right access permissions first to only expose devices to the container I really want to access.
environment:
# Allows graphical programs in the container.
- DISPLAY=${DISPLAY}
- QT_X11_NO_MITSHM=1
- NVIDIA_DRIVER_CAPABILITIES=all # For Nvidia hardware acceleration (CUDA, ...). This might additionally need some extra tools installed (Nvidia Container Toolkit?)
volumes:
# Allows graphical programs in the container.
- /tmp/.X11-unix:/tmp/.X11-unix:rw
- ${XAUTHORITY:-$HOME/.Xauthority}:/root/.Xauthority
devices: # all devices you may need inside the docker container (for instance I needed a joystick here)
- /dev/input/js0:/dev/input/js0 # mapping in docker is always host_path:container_path. Also, correct permissions must be set, when container is not run in privileged mode.
# Overlay image containing the extended project packages. You may want to separate your basic setup from your project specific setup for instance to improve portability. In my case I separated the essential ROS2 packages from extended usecase packages.
overlay:
extends: base # this means you take the base image as reference and run your service on top of that. Since this service ectends base, all defined environment variables and mount points I defined before remain intact, all new definitions can be seen as "overrides".
image: ghcr.io/bjoernellens1/ros2_docker_template/ros2:overlay # you might replace this with your own container registry
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/ros2_rmp/rmp:overlay
target: overlay # which section in the Dockerfile should be built.
x-bake:
platforms:
#- linux/arm64 uncomment if arm64 platform is needed (like jetson nano or raspberry pi)
- linux/amd64
volumes:
- .:/repo # mounting this folder into the container under /repo ( that means that files in this repository are directly accessible inside the container --> easy way to save changes made to files.
command: >
/bin/bash
# Additional dependencies for GUI applications: For my configuration, this last (biggest size) image will house the additional GUI tools like rviz2, gazebo, etc. This comes last as this extends the previous images. Won't be needed running on the robot. More suited for visualization, analysis or control on a separate PC.
guis:
extends: overlay
image: ghcr.io/bjoernellens1/ros2_docker_template/ros2:guis
build:
context: .
dockerfile: docker/Dockerfile
tags:
- ghcr.io/bjoernellens1/ros2_docker_template/ros2:guis
target: guis
x-bake:
platforms:
#- linux/arm64 uncomment if arm64 platform is needed (like jetson nano or raspberry pi)
- linux/amd64
#entrypoint: /bin/bash
command: >
/bin/bash
# The following service definitions do not include build instructions as they do not require additional content in the images. Meaning, if you already have the image locally, everything's fine. If you don't have the image locally, docker compose will download the image from the image registry for you. If docker compose does not have access to the image registry, it will build the corresponding image for you (so better make sur you have access).
# You can see these service definitions as an easy way to spin up separate containers for launching separate tasks. I use this to control the current operational mode of the robot (mapping or navigation an localization). In the end it's just spinning up launch files in separate containers.
# Note that service definitions will always inherit the preferences from their "parent" service. However, you can overwrite them.
# Robot State Publisher
rsp:
extends: overlay
command: >
ros2 launch cps_rmp220_support rsp.launch.py
environment:
- ROS_DOMAIN_ID=5
# Controller
controller:
extends: base
command: >
ros2 run segwayrmp SmartCar --ros-args -r cmd_vel:=cmd_vel_out -p serial_full_name:=/dev/ttyUSB0
devices:
- /dev/ttyUSB0:/dev/ttyUSB0
environment:
- ROS_DOMAIN_ID=5
restart: unless-stopped
# teleop
teleop:
extends: base
command: >
ros2 launch rmp220_teleop robot_joystick.launch.py
devices:
- /dev/input/js0:/dev/input/js0
environment:
- ROS_DOMAIN_ID=5
# lidar
lidar:
extends: overlay
command: >
ros2 launch cps_rmp220_support robot_lidar.launch.py serial_port:=/dev/ttyUSB0
environment:
- ROS_DOMAIN_ID=5
devices:
- /dev/ttyUSB1:/dev/ttyUSB1
- /dev/ttyUSB0:/dev/ttyUSB0
# localiaztion by ekf node
ekf:
extends: overlay
command: >
ros2 launch cps_rmp220_support robot_localization.launch.py
environment:
- ROS_DOMAIN_ID=5
# mapping
mapping:
extends: overlay
command: >
ros2 launch cps_rmp220_support robot_mapping.launch.py
environment:
- ROS_DOMAIN_ID=5
# slam_localization
localization:
extends: overlay
command: >
ros2 launch cps_rmp220_support robot_mapping_localization.launch.py map_file_name:=/repo/maps/map.yaml
environment:
- ROS_DOMAIN_ID=5
# amcl_localization
amcl:
extends: overlay
command: >
ros2 launch cps_rmp220_support robot_amcl.launch.py map:=/repo/maps/map.yaml
environment:
- ROS_DOMAIN_ID=5
# navigation
navigation:
extends: overlay
command: >
ros2 launch cps_rmp220_support robot_navigation.launch.py
map_subscribe_transient_local:=true
environment:
- ROS_DOMAIN_ID=5
# bash
bash:
extends: overlay
command: >
/bin/bash
environment:
- ROS_DOMAIN_ID=5
# rviz2
rviz2:
extends: guis
command: >
ros2 launch cps_rmp220_support rviz.launch.py
environment:
- ROS_DOMAIN_ID=5