Skip to content

Commit

Permalink
Big Bang
Browse files Browse the repository at this point in the history
Signed-off-by: Andrej Orsula <[email protected]>
  • Loading branch information
AndrejOrsula committed Oct 20, 2024
0 parents commit c8528ce
Show file tree
Hide file tree
Showing 320 changed files with 30,237 additions and 0 deletions.
57 changes: 57 additions & 0 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
{
"name": "${localWorkspaceFolderBasename}",
"build": {
"context": "${localWorkspaceFolder}",
"dockerfile": "${localWorkspaceFolder}/Dockerfile",
"cacheFrom": "andrejorsula/space_robotics_bench"
},
"workspaceFolder": "/root/ws",
"workspaceMount": "type=bind,source=${localWorkspaceFolder},target=/root/ws",
"runArgs": [
// Network mode
"--network=host",
"--ipc=host",
// NVIDIA GPU
"--gpus=all",
// Other GPUs
"--device=/dev/dri:/dev/dri",
"--group-add=video"
],
"mounts": [
/// Common
// Time
"type=bind,source=/etc/localtime,target=/etc/localtime,readonly",
"type=bind,source=/etc/timezone,target=/etc/timezone,readonly",
// GUI (X11)
"type=bind,source=/tmp/.X11-unix,target=/tmp/.X11-unix",
"type=bind,source=${localEnv:TMPDIR:/tmp}/xauth_docker_vsc_${localWorkspaceFolderBasename},target=${localEnv:TMPDIR:/tmp}/xauth_docker_vsc_${localWorkspaceFolderBasename}",
/// Isaac Sim
// Data
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/data/isaac-sim,target=/root/isaac-sim/kit/data",
// Cache
"type=bind,source=${localEnv:HOME}/.cache/isaac-sim,target=/root/isaac-sim/kit/cache",
"type=bind,source=${localEnv:HOME}/.cache/nvidia/GLCache,target=/root/.cache/nvidia/GLCache",
"type=bind,source=${localEnv:HOME}/.cache/ov,target=/root/.cache/ov",
"type=bind,source=${localEnv:HOME}/.nv/ComputeCache,target=/root/.nv/ComputeCache",
// Logs
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/logs,target=/root/.nvidia-omniverse/logs",
"type=bind,source=${localEnv:HOME}/.nvidia-omniverse/logs/isaac-sim,target=/root/isaac-sim/kit/logs",
/// Project
// Cache
"type=bind,source=${localEnv:HOME}/.cache/srb,target=/root/.cache/srb"
],
"containerEnv": {
// GUI (X11)
"DISPLAY": "${localEnv:DISPLAY}",
"XAUTHORITY": "${localEnv:TMPDIR:/tmp}/xauth_docker_vsc_${localWorkspaceFolderBasename}",
// NVIDIA GPU
"NVIDIA_VISIBLE_DEVICES": "all",
"NVIDIA_DRIVER_CAPABILITIES": "all"
},
"initializeCommand": "XAUTH=\"${localEnv:TMPDIR:/tmp}/xauth_docker_vsc_${localWorkspaceFolderBasename}\"; touch \"${XAUTH}\"; chmod a+r \"${XAUTH}\"; XAUTH_LIST=$(xauth nlist \"${localEnv:DISPLAY}\"); if [ -n \"${XAUTH_LIST}\" ]; then echo \"${XAUTH_LIST}\" | sed -e 's/^..../ffff/' | xauth -f \"${XAUTH}\" nmerge -; fi",
"customizations": {
"vscode": {
"extensions": []
}
}
}
30 changes: 30 additions & 0 deletions .devcontainer/open.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/usr/bin/env bash
### Open the Dev Container in VS Code
### Usage: open.bash
set -e

SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"
REPOSITORY_DIR="$(dirname "${SCRIPT_DIR}")"

## Determine the workspace folder
if [[ -n "$1" ]]; then
# Use the first argument as the workspace folder if provided
WORKSPACE_FOLDER="$1"
else
# Otherwise, try to extract the workspace folder from `./devcontainer.json`
WORKSPACE_FOLDER="$(grep -Po '"workspaceFolder":.*?[^\\]",' "${SCRIPT_DIR}/devcontainer.json" | cut -d'"' -f4 || true)"
if [[ -z "${WORKSPACE_FOLDER}" ]]; then
# If `./devcontainer.json` does not contain the workspace folder, default to the root
WORKSPACE_FOLDER="/"
fi
fi

## Open the Dev Container in VS Code
CODE_REMOTE_CMD=(
code --remote
"dev-container+$(printf "%s" "${REPOSITORY_DIR}" | xxd -p | tr -d "[:space:]")"
"${WORKSPACE_FOLDER}"
)
echo -e "\033[1;90m${CODE_REMOTE_CMD[*]}\033[0m" | xargs
# shellcheck disable=SC2048
exec ${CODE_REMOTE_CMD[*]}
40 changes: 40 additions & 0 deletions .docker/build.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
#!/usr/bin/env bash
### Build the Docker image
### Usage: build.bash [TAG] [BUILD_ARGS...]
set -e

SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"
REPOSITORY_DIR="$(dirname "${SCRIPT_DIR}")"

## If the current user is not in the docker group, all docker commands will be run as root
if ! grep -qi /etc/group -e "docker.*${USER}"; then
echo "[INFO] The current user '${USER}' is not detected in the docker group. All docker commands will be run as root."
WITH_SUDO="sudo"
fi

## Determine the name of the image to build
DOCKERHUB_USER="$(${WITH_SUDO} docker info 2>/dev/null | sed '/Username:/!d;s/.* //')"
PROJECT_NAME="$(basename "${REPOSITORY_DIR}")"
IMAGE_NAME="${DOCKERHUB_USER:+${DOCKERHUB_USER}/}${PROJECT_NAME}"

## Parse TAG and forward additional build arguments
if [ "${#}" -gt "0" ]; then
if [[ "${1}" != "-"* ]]; then
IMAGE_NAME+=":${1}"
BUILD_ARGS=${*:2}
else
BUILD_ARGS=${*:1}
fi
fi

## Build the image
DOCKER_BUILD_CMD=(
"${WITH_SUDO}" docker build
"${REPOSITORY_DIR}"
--file "${REPOSITORY_DIR}/Dockerfile"
--tag "${IMAGE_NAME}"
"${BUILD_ARGS}"
)
echo -e "\033[1;90m[TRACE] ${DOCKER_BUILD_CMD[*]}\033[0m" | xargs
# shellcheck disable=SC2048
exec ${DOCKER_BUILD_CMD[*]}
31 changes: 31 additions & 0 deletions .docker/demo.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
#!/usr/bin/env bash
set -e

SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"

## Config
# Additional volumes to mount inside the container
EXTRA_DOCKER_VOLUMES=(
"${HOME}/Videos:/root/Videos"
)
# Additional environment variables to set inside the container
EXTRA_DOCKER_ENVIRON=(
SRB_SKIP_EXT_MOD_UPDATE="1"
)

## Parse arguments
DEFAULT_CMD="cargo run --release --package space_robotics_bench_gui"
if [ "${#}" -gt "0" ]; then
CMD=${*:1}
fi

## Run the container
DOCKER_RUN_CMD=(
"${SCRIPT_DIR}/run.bash"
"${EXTRA_DOCKER_VOLUMES[@]/#/"-v "}"
"${EXTRA_DOCKER_ENVIRON[@]/#/"-e "}"
"${CMD:-${DEFAULT_CMD}}"
)
echo -e "\033[1;90m[TRACE] ${DOCKER_RUN_CMD[*]}\033[0m" | xargs
# shellcheck disable=SC2048
exec ${DOCKER_RUN_CMD[*]}
30 changes: 30 additions & 0 deletions .docker/dev.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
#!/usr/bin/env bash
### Run a Docker container with additional development volumes mounted
### Usage: dev.bash [-v HOST_DIR:DOCKER_DIR:OPTIONS] [-e ENV=VALUE] [TAG] [CMD]
set -e

SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"
REPOSITORY_DIR="$(dirname "${SCRIPT_DIR}")"
WS_DIR="$(dirname "${REPOSITORY_DIR}")"

## Config
# Development volumes to mount inside the container
DOCKER_DEV_VOLUMES=(
"${WS_DIR}/isaaclab:/root/isaaclab:rw"
# "${WS_DIR}/dreamerv3:/root/dreamerv3:rw"
)
# Development environment variables to set inside the container
DOCKER_DEV_ENVIRON=(
SRB_WITH_TRACEBACK="${SRB_WITH_TRACEBACK:-true}"
)

## Run the container with development volumes
DOCKER_DEV_CMD=(
WITH_DEV_VOLUME=true "${SCRIPT_DIR}/run.bash"
"${DOCKER_DEV_VOLUMES[@]/#/"-v "}"
"${DOCKER_DEV_ENVIRON[@]/#/"-e "}"
"${*:1}"
)
echo -e "\033[1;90m[TRACE] ${DOCKER_DEV_CMD[*]}\033[0m" | xargs
# shellcheck disable=SC2048
exec ${DOCKER_DEV_CMD[*]}
130 changes: 130 additions & 0 deletions .docker/host/install_docker.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
#!/usr/bin/env bash
### Install Docker and NVIDIA Container Toolkit
### Usage: install_docker.bash
set -e

## Install curl if missing
if ! command -v curl >/dev/null 2>&1; then
if command -v apt-get >/dev/null 2>&1; then
sudo apt-get install -y curl
elif command -v dnf >/dev/null 2>&1; then
sudo dnf install -y curl
elif command -v yum >/dev/null 2>&1; then
sudo yum install -y curl
fi
fi

## Install Docker via the convenience script
curl -fsSL https://get.docker.com | sh
sudo systemctl enable --now docker

## (Optional) Install support for NVIDIA if an NVIDIA GPU is detected and the installation is requested
check_nvidia_gpu() {
if ! lshw -C display 2>/dev/null | grep -qi "vendor.*nvidia"; then
return 1 # NVIDIA GPU is not present
elif ! command -v nvidia-smi >/dev/null 2>&1; then
return 1 # NVIDIA GPU is present but nvidia-utils not installed
elif ! nvidia-smi -L &>/dev/null; then
return 1 # NVIDIA GPU is present but is not working properly
else
return 0 # NVIDIA GPU is present and appears to be working
fi
}
configure_nvidia_apt_repository() {
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey |
sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg &&
curl -sL https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list |
sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' |
sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list
sudo apt-get update
}
if check_nvidia_gpu; then
echo -e "[INFO] NVIDIA GPU detected."
DOCKER_VERSION="$(sudo docker version --format '{{.Server.Version}}' 2>/dev/null)"
MIN_VERSION_FOR_TOOLKIT="19.3"
if [ "$(printf '%s\n' "${MIN_VERSION_FOR_TOOLKIT}" "${DOCKER_VERSION}" | sort -V | head -n1)" = "$MIN_VERSION_FOR_TOOLKIT" ]; then
if ! command -v nvidia-container-toolkit >/dev/null 2>&1; then
while true; do
read -erp "Do you want to install NVIDIA Container Toolkit? [Y/n]: " INSTALL_NVIDIA_CONTAINER_TOOLKIT
case "${INSTALL_NVIDIA_CONTAINER_TOOLKIT,,}" in
"" | y | yes)
INSTALL_NVIDIA_CONTAINER_TOOLKIT=true
break
;;
n | no)
INSTALL_NVIDIA_CONTAINER_TOOLKIT=false
break
;;
esac
done
if [[ "${INSTALL_NVIDIA_CONTAINER_TOOLKIT}" = true ]]; then
if command -v apt-get >/dev/null 2>&1; then
configure_nvidia_apt_repository
sudo apt-get install -y nvidia-container-toolkit
elif command -v yum >/dev/null 2>&1; then
curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo |
sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo
sudo yum install -y nvidia-container-toolkit
else
echo >&2 -e "\033[1;31m[ERROR] Supported package manager not found. Please install nvidia-container-toolkit manually.\033[0m"
fi
sudo systemctl restart --now docker
fi
else
echo -e "[INFO] NVIDIA Container Toolkit is already installed."
fi
else
if ! command -v nvidia-docker >/dev/null 2>&1; then
while true; do
read -erp "Do you want to install NVIDIA Docker [Y/n]: " INSTALL_NVIDIA_DOCKER
case "${INSTALL_NVIDIA_DOCKER,,}" in
"" | y | yes)
INSTALL_NVIDIA_DOCKER=true
break
;;
n | no)
INSTALL_NVIDIA_DOCKER=false
break
;;
esac
done
if [[ "${INSTALL_NVIDIA_DOCKER}" = true ]]; then
if command -v apt-get >/dev/null 2>&1; then
configure_nvidia_apt_repository
sudo apt-get install -y nvidia-docker2
else
echo >&2 -e "\033[1;31m[ERROR] Supported package manager not found. Please install nvidia-docker2 manually.\033[0m"
fi
sudo systemctl restart --now docker
fi
else
echo -e "[INFO] NVIDIA Docker is already installed."
fi
fi
fi

if [[ $(grep /etc/group -e "docker") != *"${USER}"* ]]; then
[ -z "${PS1}" ]
## (Optional) Add user to docker group
while true; do
read -erp "Do you want to add the current user ${USER} to the docker group? [Y/n]: " ADD_USER_TO_DOCKER_GROUP
case "${ADD_USER_TO_DOCKER_GROUP,,}" in
"" | y | yes)
ADD_USER_TO_DOCKER_GROUP=true
break
;;
n | no)
ADD_USER_TO_DOCKER_GROUP=false
break
;;
esac
done
if [[ "${ADD_USER_TO_DOCKER_GROUP}" = true ]]; then
sudo groupadd -f docker
sudo usermod -aG docker "${USER}"
newgrp docker
echo -e "[INFO] The current user ${USER} was added to the docker group."
fi
else
echo -e "[INFO] The current user ${USER} is already in the docker group."
fi
51 changes: 51 additions & 0 deletions .docker/hpc/build.bash
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
#!/usr/bin/env bash
### Build a Singularity image from the Docker image
### Usage: build.bash [TAG] [BUILD_ARGS...]
set -e

SCRIPT_DIR="$(cd "$(dirname "$(readlink -f "${BASH_SOURCE[0]}")")" &>/dev/null && pwd)"
DOT_DOCKER_DIR="$(dirname "${SCRIPT_DIR}")"
REPOSITORY_DIR="$(dirname "${DOT_DOCKER_DIR}")"
IMAGES_DIR="${SCRIPT_DIR}/images"
DOCKER_BUILD_SCRIPT="${DOT_DOCKER_DIR}/build.bash"
export APPTAINER_TMPDIR="${APPTAINER_TMPDIR:-"${HOME}/.apptainer/tmp"}"

## If the current user is not in the docker group, all docker commands will be run as root
if ! grep -qi /etc/group -e "docker.*${USER}"; then
echo "[INFO] The current user '${USER}' is not detected in the docker group. All docker commands will be run as root."
WITH_SUDO="sudo"
fi

## Determine the name of the image to build and the output path
DOCKERHUB_USER="$(${WITH_SUDO} docker info 2>/dev/null | sed '/Username:/!d;s/.* //')"
PROJECT_NAME="$(basename "${REPOSITORY_DIR}")"
IMAGE_NAME="${DOCKERHUB_USER:+${DOCKERHUB_USER}/}${PROJECT_NAME}"
OUTPUT_PATH="${IMAGES_DIR}/${PROJECT_NAME}.sif"

## Parse TAG and forward additional build arguments
if [ "${#}" -gt "0" ]; then
if [[ "${1}" != "-"* ]]; then
TAG="${1}"
BUILD_ARGS=${*:2}
else
BUILD_ARGS=${*:1}
fi
fi
TAG="${TAG:-"latest"}"
IMAGE_NAME+=":${TAG}"

## Create the temporary directory for the Singularity image
mkdir -p "${APPTAINER_TMPDIR}"

## Build the Docker image
"${DOCKER_BUILD_SCRIPT}" "${TAG}" "${BUILD_ARGS}"

## Convert the Docker image to a Singularity image
APPTAINER_BUILD_CMD=(
apptainer build
"${OUTPUT_PATH}"
"docker-daemon:${IMAGE_NAME}"
)
echo -e "\033[1;90m[TRACE] ${APPTAINER_BUILD_CMD[*]}\033[0m" | xargs
# shellcheck disable=SC2048
exec ${APPTAINER_BUILD_CMD[*]}
Loading

0 comments on commit c8528ce

Please sign in to comment.