Files
dotfiles/profiles/desktop/.local/bin/distrobox-create
2026-02-13 04:20:30 +01:00

1094 lines
38 KiB
Bash
Executable File

#!/bin/sh
# SPDX-License-Identifier: GPL-3.0-only
#
# This file is part of the distrobox project:
# https://github.com/89luca89/distrobox
#
# Copyright (C) 2021 distrobox contributors
#
# distrobox is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3
# as published by the Free Software Foundation.
#
# distrobox is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with distrobox; if not, see <http://www.gnu.org/licenses/>.
# POSIX
# Expected env variables:
# HOME
# USER
# Optional env variables:
# DBX_CONTAINER_ALWAYS_PULL
# DBX_CONTAINER_CUSTOM_HOME
# DBX_CONTAINER_HOME_PREFIX
# DBX_CONTAINER_IMAGE
# DBX_CONTAINER_MANAGER
# DBX_CONTAINER_NAME
# DBX_CONTAINER_HOSTNAME
# DBX_CONTAINER_GENERATE_ENTRY
# DBX_NON_INTERACTIVE
# DBX_VERBOSE
# DBX_SUDO_PROGRAM
# DBX_USERNS_NOLIMIT
# Despite of running this script via SUDO/DOAS being not supported (the
# script itself will call the appropriate tool when necessary), we still want
# to allow people to run it as root, logged in in a shell, and create rootful
# containers.
#
# SUDO_USER is a variable set by SUDO and can be used to check whether the script was called by it. Same thing for DOAS_USER, set by DOAS.
if {
[ -n "${SUDO_USER}" ] || [ -n "${DOAS_USER}" ]
} && [ "$(id -ru)" -eq 0 ]; then
printf >&2 "Running %s via SUDO/DOAS is not supported. Instead, please try running:\n" "$(basename "${0}")"
printf >&2 " %s --root %s\n" "$(basename "${0}")" "$*"
exit 1
fi
# Ensure we have our env variables correctly set
[ -z "${USER}" ] && USER="$(id -run)"
[ -z "${HOME}" ] && HOME="$(getent passwd "${USER}" | cut -d':' -f6)"
[ -z "${SHELL}" ] && SHELL="$(getent passwd "${USER}" | cut -d':' -f7)"
# Defaults
container_additional_packages=""
container_additional_volumes=""
container_always_pull=0
container_clone=""
container_generate_entry=1
container_home_prefix=""
container_image=""
container_image_default="registry.fedoraproject.org/fedora-toolbox:latest"
container_init_hook=""
container_manager="autodetect"
container_manager_additional_flags=""
container_platform=""
container_name=""
container_name_default="my-distrobox"
container_hostname=""
container_pre_init_hook=""
container_user_custom_home=""
container_user_gid="$(id -rg)"
container_user_home="${HOME:-"/"}"
container_user_name="${USER}"
container_user_uid="$(id -ru)"
dryrun=0
init=0
non_interactive=0
nvidia=0
nopasswd=0
unshare_ipc=0
unshare_groups=0
unshare_netns=0
unshare_process=0
unshare_devsys=0
# Use cd + dirname + pwd so that we do not have relative paths in mount points
# We're not using "realpath" here so that symlinks are not resolved this way
# "realpath" would break situations like Nix or similar symlink based package
# management.
distrobox_entrypoint_path="$(cd "$(dirname "${0}")" && pwd)/distrobox-init"
distrobox_export_path="$(cd "$(dirname "${0}")" && pwd)/distrobox-export"
distrobox_genentry_path="$(cd "$(dirname "${0}")" && pwd)/distrobox-generate-entry"
distrobox_hostexec_path="$(cd "$(dirname "${0}")" && pwd)/distrobox-host-exec"
# In case some of the scripts are not in the same path as create, let's search
# in PATH for them.
[ ! -e "${distrobox_entrypoint_path}" ] && distrobox_entrypoint_path="$(command -v distrobox-init)"
[ ! -e "${distrobox_export_path}" ] && distrobox_export_path="$(command -v distrobox-export)"
[ ! -e "${distrobox_genentry_path}" ] && distrobox_genentry_path="$(command -v distrobox-generate-entry)"
[ ! -e "${distrobox_hostexec_path}" ] && distrobox_hostexec_path="$(command -v distrobox-host-exec)"
# If the user runs this script as root in a login shell, set rootful=1.
# There's no need for them to pass the --root flag option in such cases.
[ "${container_user_uid}" -eq 0 ] && rootful=1 || rootful=0
userns_nolimit=0
verbose=0
version="1.8.2.2"
app_cache_dir=${XDG_CACHE_HOME:-"${HOME}/.cache"}/distrobox
# Source configuration files, this is done in an hierarchy so local files have
# priority over system defaults
# leave priority to environment variables.
#
# On NixOS, for the distrobox derivation to pick up a static config file shipped
# by the package maintainer the path must be relative to the script itself.
self_dir="$(dirname "$(realpath "$0")")"
nix_config_file="${self_dir}/../share/distrobox/distrobox.conf"
config_files="
${nix_config_file}
/usr/share/distrobox/distrobox.conf
/usr/share/defaults/distrobox/distrobox.conf
/usr/etc/distrobox/distrobox.conf
/usr/local/share/distrobox/distrobox.conf
/etc/distrobox/distrobox.conf
${XDG_CONFIG_HOME:-"${HOME}/.config"}/distrobox/distrobox.conf
${HOME}/.distroboxrc
"
for config_file in ${config_files}; do
# Shellcheck will give error for sourcing a variable file as it cannot follow
# it. We don't care so let's disable this linting for now.
# shellcheck disable=SC1090
[ -e "${config_file}" ] && . "$(realpath "${config_file}")"
done
# If we're running this script as root -- as in logged in in the shell as root
# user, and not via SUDO/DOAS --, we don't need to set distrobox_sudo_program
# as it's meaningless for this use case.
if [ "${container_user_uid}" -ne 0 ]; then
# If the DBX_SUDO_PROGRAM/distrobox_sudo_program variable was set by the
# user, use its value instead of "sudo". But only if not running the script
# as root (UID 0).
distrobox_sudo_program=${DBX_SUDO_PROGRAM:-${distrobox_sudo_program:-"sudo"}}
fi
[ -n "${DBX_CONTAINER_ALWAYS_PULL}" ] && container_always_pull="${DBX_CONTAINER_ALWAYS_PULL}"
[ -n "${DBX_CONTAINER_CUSTOM_HOME}" ] && container_user_custom_home="${DBX_CONTAINER_CUSTOM_HOME}"
[ -n "${DBX_CONTAINER_HOME_PREFIX}" ] && container_home_prefix="${DBX_CONTAINER_HOME_PREFIX}"
[ -n "${DBX_CONTAINER_IMAGE}" ] && container_image="${DBX_CONTAINER_IMAGE}"
[ -n "${DBX_CONTAINER_MANAGER}" ] && container_manager="${DBX_CONTAINER_MANAGER}"
[ -n "${DBX_CONTAINER_NAME}" ] && container_name="${DBX_CONTAINER_NAME}"
[ -n "${DBX_CONTAINER_HOSTNAME}" ] && container_hostname="${DBX_CONTAINER_HOSTNAME}"
[ -n "${DBX_CONTAINER_GENERATE_ENTRY}" ] && container_generate_entry="${DBX_CONTAINER_GENERATE_ENTRY}"
[ -n "${DBX_NON_INTERACTIVE}" ] && non_interactive="${DBX_NON_INTERACTIVE}"
[ -n "${DBX_VERBOSE}" ] && verbose="${DBX_VERBOSE}"
[ -n "${DBX_USERNS_NOLIMIT}" ] && userns_nolimit="${DBX_USERNS_NOLIMIT}"
# Fixup variable=[true|false], in case we find it in the config file(s)
[ "${non_interactive}" = "true" ] && non_interactive=1
[ "${non_interactive}" = "false" ] && non_interactive=0
[ "${verbose}" = "true" ] && verbose=1
[ "${verbose}" = "false" ] && verbose=0
[ "${userns_nolimit}" = "true" ] && userns_nolimit=1
[ "${userns_nolimit}" = "false" ] && userns_nolimit=0
# show_help will print usage to stdout.
# Arguments:
# None
# Expected global variables:
# version: string distrobox version
# container_image_default: string default container image to use
# container_name_default: string default container name to use
# Expected env variables:
# None
# Outputs:
# print usage with examples.
show_help()
{
cat << EOF
distrobox version: ${version}
Usage:
distrobox create --image alpine:latest --name test --init-hooks "touch /var/tmp/test1 && touch /var/tmp/test2"
distrobox create --image fedora:39 --name test --additional-flags "--env MY_VAR-value"
distrobox create --image fedora:39 --name test --volume /opt/my-dir:/usr/local/my-dir:rw --additional-flags "--pids-limit 100"
distrobox create -i docker.io/almalinux/8-init --init --name test --pre-init-hooks "dnf config-manager --enable powertools && dnf -y install epel-release"
distrobox create --clone fedora-39 --name fedora-39-copy
distrobox create --image alpine my-alpine-container
distrobox create --image registry.fedoraproject.org/fedora-toolbox:latest --name fedora-toolbox-latest
distrobox create --pull --image centos:stream9 --home ~/distrobox/centos9
distrobox create --image alpine:latest --name test2 --additional-packages "git tmux vim"
distrobox create --image ubuntu:22.04 --name ubuntu-nvidia --nvidia
DBX_NON_INTERACTIVE=1 DBX_CONTAINER_NAME=test-alpine DBX_CONTAINER_IMAGE=alpine distrobox-create
Options:
--image/-i: image to use for the container default: ${container_image_default}
--name/-n: name for the distrobox default: ${container_name_default}
--hostname: hostname for the distrobox default: $(uname -n)
--pull/-p: pull the image even if it exists locally (implies --yes)
--yes/-Y: non-interactive, pull images without asking
--root/-r: launch podman/docker/lilipod with root privileges. This is the only supported way to run with root
privileges. Do not use "sudo distrobox". If you need to specify a different program (e.g. 'doas') for root privileges,
use the DBX_SUDO_PROGRAM environment variable or the 'distrobox_sudo_program' config variable.
--clone/-c: name of the distrobox container to use as base for a new container
this will be useful to either rename an existing distrobox or have multiple copies
of the same environment.
--home/-H: select a custom HOME directory for the container. Useful to avoid host's home littering with temp files.
--volume: additional volumes to add to the container
--additional-flags/-a: additional flags to pass to the container manager command
--additional-packages/-ap: additional packages to install during initial container setup
--init-hooks: additional commands to execute at the end of container initialization
--pre-init-hooks: additional commands to execute at the start of container initialization
--init/-I: use init system (like systemd) inside the container.
this will make host's processes not visible from within the container. (assumes --unshare-process)
may require additional packages depending on the container image: https://github.com/89luca89/distrobox/blob/main/docs/useful_tips.md#using-init-system-inside-a-distrobox
--nvidia: try to integrate host's nVidia drivers in the guest
--platform: specify which platform to use, eg: linux/arm64
--unshare-devsys: do not share host devices and sysfs dirs from host
--unshare-groups: do not forward user's additional groups into the container
--unshare-ipc: do not share ipc namespace with host
--unshare-netns: do not share the net namespace with host
--unshare-process: do not share process namespace with host
--unshare-all: activate all the unshare flags below
--compatibility/-C: show list of compatible images
--help/-h: show this message
--no-entry: do not generate a container entry in the application list
--dry-run/-d: only print the container manager command generated
--verbose/-v: show more verbosity
--version/-V: show version
--absolutely-disable-root-password-i-am-really-positively-sure: ⚠️ ⚠️ when setting up a rootful distrobox, this will skip user password setup, leaving it blank. ⚠️ ⚠️
Compatibility:
for a list of compatible images and container managers, please consult the man page:
man distrobox-compatibility
or run
distrobox create --compatibility
or consult the documentation page on: https://github.com/89luca89/distrobox/blob/main/docs/compatibility.md
EOF
}
# show_compatibility will print the list of compatible images to stdout, caching locally in a file.
# Arguments:
# None
# Expected global variables:
# app_cache_dir: cache dir to write to
# version: distrobox version
# Expected env variables:
# None
# Outputs:
# print usage with examples.
show_compatibility()
{
if [ ! -e "${app_cache_dir}/distrobox-compatibility-${version}" ] ||
[ ! -s "${app_cache_dir}/distrobox-compatibility-${version}" ]; then
mkdir -p "${app_cache_dir}"
# If we don't have a cache file, we need connectivity. Ensure we have
# one and return error if not.
if ! curl -s "https://github.com" > /dev/null; then
printf >&2 "ERROR: no cache file and no connectivity found, cannot retrieve compatibility list.\n"
exit 1
fi
# We want to download the correspondent version of the compatibility table and extract a list from it.
# Always use the docs as source of truth for this.
curl -s \
"https://raw.githubusercontent.com/89luca89/distrobox/${version}/docs/compatibility.md" |
sed -n -e '/| Alma/,/| Void/ p' |
cut -d '|' -f 4 |
sed 's|<br>|\n|g' |
tr -d ' ' |
sort -u > "${app_cache_dir}/distrobox-compatibility-${version}"
fi
cat "${app_cache_dir}/distrobox-compatibility-${version}"
}
# Parse arguments
while :; do
case $1 in
-h | --help)
# Call a "show_help" function to display a synopsis, then exit.
show_help
exit 0
;;
-v | --verbose)
verbose=1
shift
;;
-V | --version)
printf "distrobox: %s\n" "${version}"
exit 0
;;
--no-entry)
shift
container_generate_entry=0
;;
-d | --dry-run)
shift
dryrun=1
;;
-r | --root)
shift
rootful=1
;;
--absolutely-disable-root-password-i-am-really-positively-sure)
shift
nopasswd=1
;;
-I | --init)
shift
init=1
unshare_groups=1
unshare_process=1
;;
--unshare-ipc)
shift
unshare_ipc=1
;;
--unshare-groups)
shift
unshare_groups=1
;;
--unshare-netns)
shift
unshare_netns=1
;;
--unshare-process)
shift
unshare_process=1
;;
--unshare-devsys)
shift
unshare_devsys=1
;;
--unshare-all)
shift
unshare_devsys=1
unshare_groups=1
unshare_ipc=1
unshare_netns=1
unshare_process=1
;;
-C | --compatibility)
show_compatibility
exit 0
;;
-i | --image)
if [ -n "$2" ]; then
container_image="$2"
shift
shift
fi
;;
-n | --name)
if [ -n "$2" ]; then
container_name="$2"
shift
shift
fi
;;
--hostname)
if [ -n "$2" ]; then
container_hostname="$2"
shift
shift
fi
;;
-c | --clone)
if [ -n "$2" ]; then
container_clone="$2"
shift
shift
fi
;;
-H | --home)
if [ -n "$2" ]; then
# Remove trailing slashes
container_user_custom_home="$(echo "$2" | sed 's:/*$::')"
shift
shift
fi
;;
-p | --pull)
container_always_pull=1
shift
;;
--nvidia)
shift
nvidia=1
;;
-Y | --yes)
non_interactive=1
shift
;;
--volume)
if [ -n "$2" ]; then
container_additional_volumes="${container_additional_volumes} ${2}"
shift
shift
fi
;;
--platform)
if [ -n "$2" ]; then
container_platform="--platform=${2}"
shift
shift
fi
;;
-a | --additional-flags)
if [ -n "$2" ]; then
container_manager_additional_flags="${container_manager_additional_flags} ${2}"
shift
shift
fi
;;
-ap | --additional-packages)
if [ -n "$2" ]; then
container_additional_packages="${container_additional_packages} ${2}"
shift
shift
fi
;;
--init-hooks)
if [ -n "$2" ]; then
container_init_hook="$2"
shift
shift
fi
;;
--pre-init-hooks)
if [ -n "$2" ]; then
container_pre_init_hook="${2}"
shift
shift
fi
;;
--) # End of all options.
shift
break
;;
-*) # Invalid options.
printf >&2 "ERROR: Invalid flag '%s'\n\n" "$1"
show_help
exit 1
;;
*) # Default case: If no more options then break out of the loop.
# If we have a flagless option and container_name is not specified
# then let's accept argument as container_name
if [ -n "$1" ]; then
container_name="$1"
shift
else
break
fi
;;
esac
done
set -o errexit
set -o nounset
# set verbosity
if [ "${verbose}" -ne 0 ]; then
set -o xtrace
fi
# If no clone option and no container image, let's choose a default image to use.
# Fedora toolbox is a sensitive default
if [ -z "${container_clone}" ] && [ -z "${container_image}" ]; then
container_image="${container_image_default}"
fi
# If no name is specified and we're using the default container_image, then let's
# set a default name for the container, that is distinguishable from the default
# toolbx one. This will avoid problems when using both toolbx and distrobox on
# the same system.
if [ -z "${container_name}" ] && [ "${container_image}" = "${container_image_default}" ]; then
container_name="${container_name_default}"
fi
# If no container_name is declared, we build our container name starting from the
# container image specified.
#
# Examples:
# alpine -> alpine
# ubuntu:20.04 -> ubuntu-20.04
# registry.fedoraproject.org/fedora-toolbox:39 -> fedora-toolbox-39
# ghcr.io/void-linux/void-linux:latest-full-x86_64 -> void-linux-latest-full-x86_64
if [ -z "${container_name}" ]; then
container_name="$(basename "${container_image}" | sed -E 's/[:.]/-/g')"
fi
# set the container hostname to default value
if [ -z "${container_hostname}" ]; then
container_hostname="$(uname -n)"
if [ "${unshare_netns}" -eq 1 ]; then
container_hostname="${container_name}.${container_hostname}"
fi
fi
# check if container hostname is less than 64 chars to prevent issues
if [ "$(printf "%s" "${container_hostname}" | wc -m)" -gt 64 ]; then
printf >&2 "ERROR: Invalid hostname '%s', longer than 64 characters\n" "${container_hostname}"
printf >&2 "ERROR: Use use --hostname argument to set it manually\n"
exit 1
fi
# We depend on a container manager let's be sure we have it
# First we use podman, else docker, else lilipod
case "${container_manager}" in
autodetect)
if command -v podman > /dev/null; then
container_manager="podman"
elif command -v podman-launcher > /dev/null; then
container_manager="podman-launcher"
elif command -v docker > /dev/null; then
container_manager="docker"
elif command -v lilipod > /dev/null; then
container_manager="lilipod"
fi
;;
podman)
container_manager="podman"
;;
podman-launcher)
container_manager="podman-launcher"
;;
lilipod)
container_manager="lilipod"
;;
docker)
container_manager="docker"
;;
*)
printf >&2 "Invalid input %s.\n" "${container_manager}"
printf >&2 "The available choices are: 'autodetect', 'podman', 'docker', 'lilipod'\n"
;;
esac
# Be sure we have a container manager to work with.
if ! command -v "${container_manager}" > /dev/null && [ "${dryrun}" -eq 0 ]; then
# Error: we need at least one between docker, podman or lilipod.
printf >&2 "Missing dependency: we need a container manager.\n"
printf >&2 "Please install one of podman, docker or lilipod.\n"
printf >&2 "You can follow the documentation on:\n"
printf >&2 "\tman distrobox-compatibility\n"
printf >&2 "or:\n"
printf >&2 "\thttps://github.com/89luca89/distrobox/blob/main/docs/compatibility.md\n"
exit 127
fi
# add verbose if -v is specified
if [ "${verbose}" -ne 0 ]; then
container_manager="${container_manager} --log-level debug"
fi
# prepend sudo (or the specified sudo program) if we want our container manager to be rootful
if [ "${rootful}" -ne 0 ]; then
container_manager="${distrobox_sudo_program-} ${container_manager}"
fi
# if nopasswd, then let the init know via a mountpoint
if [ "${nopasswd}" -ne 0 ]; then
container_manager_additional_flags="${container_manager_additional_flags}
--volume /dev/null:/run/.nopasswd:ro"
fi
# inject additional volumes if specified
if [ -n "${container_additional_volumes}" ]; then
for volume in ${container_additional_volumes}; do
container_manager_additional_flags="${container_manager_additional_flags}
--volume ${volume}"
done
fi
# Check that we have a complete distrobox installation or
# entrypoint and export will not work.
if [ -z "${distrobox_entrypoint_path}" ] || [ -z "${distrobox_export_path}" ]; then
printf >&2 "Error: no distrobox-init found in %s\n" "${PATH}"
exit 127
fi
# get_clone_image will return the image name of a cloned existing container taken
# as input.
# Arguments:
# None
# Expected global variables:
# container_manager: string container manager to use
# container_clone: string container name to clone
# Expected env variables:
# None
# Outputs:
# prints the image name of the newly cloned container
get_clone_image()
{
# We need to clone a container.
# to do this we will commit the container and create a new tag. Then use it
# as image for the new container.
#
# to perform this we first ensure the source container exists and that the
# source container is stopped, else the clone will not work,
container_source_status="$(${container_manager} inspect --type container \
--format '{{.State.Status}}' "${container_clone}")"
# If the container is not already running, we need to start if first
if [ "${container_source_status}" = "running" ]; then
printf >&2 "Container %s is running.\nPlease stop it first.\n" "${container_clone}"
printf >&2 "Cannot clone a running container.\n"
return 1
fi
# Now we can extract the container ID and commit it to use as source image
# for the new container.
container_source_id="$(${container_manager} inspect --type container \
--format '{{.ID}}' "${container_clone}")"
container_commit_tag="$(echo "${container_clone}:$(date +%F)" | tr '[:upper:]' '[:lower:]')"
# Commit current container state to a new image tag
printf >&2 "Duplicating %s...\n" "${container_clone}"
if ! ${container_manager} container commit \
"${container_source_id}" "${container_commit_tag}" > /dev/null; then
printf >&2 "Cannot clone container: %s\n" "${container_clone}"
return 1
fi
# Return the image tag to use for the new container creation.
printf "%s" "${container_commit_tag}"
return 0
}
# generate_create_command will produce a Podman or Docker command to execute.
# Arguments:
# None
# Expected global variables:
# container_manager: string container manager to use
# container_name: string container name
# container_image: string container image
# container_manager_additional_flags: string container manager additional flags to use
# container_hostname: string container hostname
# container_additional_packages: string additional packages
# container_pre_init_hook: string pre init hooks
# container_init_hook: string init hooks
# container_user_home: string user's home path
# container_user_name: string user's username
# container_user_uid: string user's UID
# container_user_gid: string user's GID
# container_home_prefix: string container's custom home prefix
# container_user_custom_home: string container's custom home path
# init: bool initful
# nvidia: bool nvidia integration
# rootful: bool rootful
# unshare_devsys: bool unshare devsys
# unshare_groups: bool unshare groups
# unshare_ipc: bool unshare ipc
# unshare_netns: bool unshare netns
# unshare_process: bool unshare proc
# Expected env variables:
# None
# Outputs:
# prints the podman, docker or lilipod command to create the distrobox container
generate_create_command()
{
# Set the container hostname the same as the container name.
result_command="${container_manager} create"
result_command="${result_command} ${container_platform}"
# use the host's namespace for ipc, network, pid, ulimit
result_command="${result_command}
--hostname \"${container_hostname}\"
--name \"${container_name}\"
--privileged
--security-opt label=disable
--security-opt apparmor=unconfined
--pids-limit=-1
--user root:root"
if [ "${unshare_ipc}" -eq 0 ]; then
result_command="${result_command}
--ipc host"
fi
if [ "${unshare_netns}" -eq 0 ]; then
result_command="${result_command}
--network host"
fi
if [ "${unshare_process}" -eq 0 ]; then
result_command="${result_command}
--pid host"
fi
# Mount useful stuff inside the container.
# We also mount host's root filesystem to /run/host, to be able to syphon
# dynamic configurations from the host.
#
# Mount user home, dev and host's root inside container.
# This grants access to external devices like usb webcams, disks and so on.
#
# Mount also the distrobox-init utility as the container entrypoint.
# Also mount in the container the distrobox-export and distrobox-host-exec
# utilities.
result_command="${result_command}
--label \"manager=distrobox\"
--label \"distrobox.unshare_groups=${unshare_groups}\"
--env \"SHELL=$(basename "${SHELL:-"/bin/bash"}")\"
--env \"HOME=${container_user_home}\"
--env \"container=${container_manager}\"
--env \"TERMINFO_DIRS=/usr/share/terminfo:/run/host/usr/share/terminfo\"
--env \"CONTAINER_ID=${container_name}\"
--volume /tmp:/tmp:rslave
--volume \"${distrobox_entrypoint_path}\":/usr/bin/entrypoint:ro
--volume \"${distrobox_export_path}\":/usr/bin/distrobox-export:ro
--volume \"${distrobox_hostexec_path}\":/usr/bin/distrobox-host-exec:ro
--volume \"${container_user_home}\":\"${container_user_home}\":rslave"
# Due to breaking change in https://github.com/opencontainers/runc/commit/d4b670fca6d0ac606777376440ffe49686ce15f4
# now we cannot mount /:/run/host as before, as it will try to mount RO partitions as RW thus breaking things.
# This will ensure we will mount directories one-by-one thus avoiding this problem.
#
# This happens ONLY with podman+runc, docker and lilipod are unaffected, so let's do this only if we have podman AND runc.
if echo "${container_manager}" | grep -q "podman" && ${container_manager} info 2> /dev/null | grep -q runc > /dev/null 2>&1; then
for rootdir in /*; do
# Skip symlinks
if [ -L "${rootdir}" ]; then
continue
fi
# Find if the directory belongs to a RO mount, if do, mount it as RO+Rslave
if findmnt --notruncate --noheadings --list --output OPTIONS --target "${rootdir}" |
tr ',' '\n' | grep -q "^ro$"; then
result_command="${result_command}
--volume ${rootdir}:/run/host${rootdir}:ro,rslave"
continue
fi
# Else we mount it RW+Rslave
result_command="${result_command}
--volume ${rootdir}:/run/host${rootdir}:rslave"
done
else
# We're either on podman+crun, docker or lilipod, let's keep old behaviour
result_command="${result_command}
--volume /:/run/host/:rslave"
fi
if [ "${unshare_devsys}" -eq 0 ]; then
result_command="${result_command}
--volume /dev:/dev:rslave
--volume /sys:/sys:rslave"
fi
# In case of initful containers, we implement a series of mountpoint in order
# for systemd to work properly inside a container.
# The following are a flag-based implementation of what podman's --systemd flag
# does under the hood, as explained in their docs here:
# https://docs.podman.io/en/latest/markdown/options/systemd.html
#
# set the default stop signal to SIGRTMIN+3.
# mount tmpfs file systems on the following directories
# /run
# /run/lock
# /tmp
# /var/lib/journal
# /sys/fs/cgroup/systemd <- this one is done by cgroupns=host
if [ "${init}" -eq 1 ] && echo "${container_manager}" | grep -q "docker"; then
# In case of docker we're actually rootful, so we need to use hosts cgroups
result_command="${result_command}
--cgroupns host"
fi
if [ "${init}" -eq 1 ] && echo "${container_manager}" | grep -vq "podman"; then
# In case of all other non-podman container managers, we can do this
result_command="${result_command}
--stop-signal SIGRTMIN+3
--mount type=tmpfs,destination=/run
--mount type=tmpfs,destination=/run/lock
--mount type=tmpfs,destination=/var/lib/journal"
fi
# This fix is needed so that the container can have a separate devpts instance
# inside
# This will mount an empty /dev/pts, and the init will take care of mounting
# a new devpts with the proper flags set
# Mounting an empty volume there, is needed in order to ensure that no package
# manager tries to fiddle with /dev/pts/X that would not be writable by them
#
# This implementation is done this way in order to be compatible with both
# docker and podman
if [ "${unshare_devsys}" -eq 0 ]; then
result_command="${result_command}
--volume /dev/pts
--volume /dev/null:/dev/ptmx"
fi
# This fix is needed as on Selinux systems, the host's selinux sysfs directory
# will be mounted inside the rootless container.
#
# This works around this and allows the rootless container to work when selinux
# policies are installed inside it.
#
# Ref. Podman issue 4452:
# https://github.com/containers/podman/issues/4452
if [ -e "/sys/fs/selinux" ]; then
result_command="${result_command}
--volume /sys/fs/selinux"
fi
# This fix is needed as systemd (or journald) will try to set ACLs on this
# path. For now overlayfs and fuse.overlayfs are not compatible with ACLs
#
# This works around this using an unnamed volume so that this path will be
# mounted with a normal non-overlay FS, allowing ACLs and preventing errors.
#
# This work around works in conjunction with distrobox-init's package manager
# setups.
# So that we can use pre/post hooks for package managers to present to the
# systemd install script a blank path to work with, and mount the host's
# journal path afterwards.
result_command="${result_command}
--volume /var/log/journal"
# In some systems, for example using sysvinit, /dev/shm is a symlink
# to /run/shm, instead of the other way around.
# Resolve this detecting if /dev/shm is a symlink and mount original
# source also in the container.
if [ -L "/dev/shm" ] && [ "${unshare_ipc}" -eq 0 ]; then
result_command="${result_command}
--volume $(realpath /dev/shm):$(realpath /dev/shm)"
fi
# Ensure that both all container managers (not only podman) support forwarding of RedHat subscription-manager
# This is needed in order to have a working subscription forwarded into the container,
# this will ensure that rhel-9-for-x86_64-appstream-rpms and rhel-9-for-x86_64-baseos-rpms repos
# will be available in the container, so that distrobox-init will be able to
# install properly all the dependencies like mesa drivers.
#
# /run/secrets is a standard location for RHEL containers, that is being pointed by
# /etc/rhsm-host by default.
RHEL_SUBSCRIPTION_FILES="
/etc/pki/entitlement/:/run/secrets/etc-pki-entitlement:ro
/etc/rhsm/:/run/secrets/rhsm:ro
/etc/yum.repos.d/redhat.repo:/run/secrets/redhat.repo:ro
"
for rhel_file in ${RHEL_SUBSCRIPTION_FILES}; do
if [ -e "$(echo "${rhel_file}" | cut -d':' -f1)" ]; then
result_command="${result_command}
--volume ${rhel_file}"
fi
done
# If we have a home prefix to use, ano no custom home set, then we set
# the custom home to be PREFIX/CONTAINER_NAME
if [ -n "${container_home_prefix}" ] && [ -z "${container_user_custom_home}" ]; then
container_user_custom_home="${container_home_prefix}/${container_name}"
fi
# If we have a custom home to use,
# 1- override the HOME env variable
# 2- export the DISTROBOX_HOST_HOME env variable pointing to original HOME
# 3- mount the custom home inside the container.
if [ -n "${container_user_custom_home}" ]; then
if [ ! -d "${container_user_custom_home}" ]; then
if ! mkdir -p "${container_user_custom_home}"; then
printf >&2 "Do you have permission to write to %s?\n" "${container_user_custom_home}"
exit 1
fi
fi
result_command="${result_command}
--env \"HOME=${container_user_custom_home}\"
--env \"DISTROBOX_HOST_HOME=${container_user_home}\"
--volume \"${container_user_custom_home}:${container_user_custom_home}:rslave\""
fi
# Mount also the /var/home dir on ostree based systems
# do this only if $HOME was not already set to /var/home/username
if [ "${container_user_home}" != "/var/home/${container_user_name}" ] &&
[ -d "/var/home/${container_user_name}" ]; then
result_command="${result_command}
--volume \"/var/home/${container_user_name}\":\"/var/home/${container_user_name}\":rslave"
fi
# Mount also the XDG_RUNTIME_DIR to ensure functionality of the apps.
# This is skipped in case of initful containers, so that a dedicated
# systemd user session can be used.
if [ -d "/run/user/${container_user_uid}" ] && [ "${init}" -eq 0 ]; then
result_command="${result_command}
--volume /run/user/${container_user_uid}:/run/user/${container_user_uid}:rslave"
fi
# These are dynamic configs needed by the container to function properly
# and integrate with the host
#
# We're doing this now instead of inside the init because some distros will
# have symlinks places for these files that use absolute paths instead of
# relative paths.
# This is the bare minimum to ensure connectivity inside the container.
# These files, will then be kept updated by the main loop every 15 seconds.
if [ "${unshare_netns}" -eq 0 ]; then
NET_FILES="
/etc/hosts
/etc/resolv.conf
"
# If container_hostname is custom, we skip mounting /etc/hostname, else
# we want to keep it in sync
if [ "${container_hostname}" = "$(uname -n)" ]; then
NET_FILES="${NET_FILES} /etc/hostname"
fi
for net_file in ${NET_FILES}; do
if [ -e "${net_file}" ]; then
result_command="${result_command}
--volume ${net_file}:${net_file}:ro"
fi
done
fi
# These flags are not supported by docker, so we use them only if our
# container manager is podman.
if echo "${container_manager}" | grep -q "podman"; then
# If possible, always prefer crun, as it allows keeping original groups.
# useful for rootless containers.
if command -v crun > /dev/null 2>&1; then
result_command="${result_command}
--runtime=crun"
fi
result_command="${result_command}
--annotation run.oci.keep_original_groups=1
--ulimit host"
if [ "${init}" -eq 1 ]; then
result_command="${result_command}
--systemd=always"
fi
# Use keep-id only if going rootless.
if [ "${rootful}" -eq 0 ]; then
result_command="${result_command}
--userns keep-id"
# Test if podman supports keep-id:size=
if podman run --rm --userns=keep-id:size=65536 "${container_image}" /bin/true > /dev/null 2>&1 || [ "$?" -eq 127 ]; then
has_keepid_size=1
else
has_keepid_size=0
fi
# Add :size=65536 if wanted
if [ "${has_keepid_size}" -eq 1 ] && [ "${userns_nolimit}" -eq 0 ]; then
result_command="${result_command}:size=65536"
fi
fi
fi
# Add additional flags
result_command="${result_command}
${container_manager_additional_flags}"
# Now execute the entrypoint, refer to `distrobox-init -h` for instructions
#
# Be aware that entrypoint corresponds to distrobox-init, the copying of it
# inside the container is moved to distrobox-enter, in the start phase.
# This is done to make init, export and host-exec location independent from
# the host, and easier to upgrade.
#
# We set the entrypoint _before_ running the container image so that
# we can override any user provided entrypoint if need be
result_command="${result_command}
--entrypoint /usr/bin/entrypoint
${container_image}
--verbose
--name \"${container_user_name}\"
--user ${container_user_uid}
--group ${container_user_gid}
--home \"${container_user_custom_home:-"${container_user_home}"}\"
--init \"${init}\"
--nvidia \"${nvidia}\"
--pre-init-hooks \"${container_pre_init_hook}\"
--additional-packages \"${container_additional_packages}\"
-- '${container_init_hook}'
"
# use container_user_custom_home if defined, else fallback to normal home.
# Return generated command.
printf "%s" "${result_command}"
}
# dry run mode, just generate the command and print it. No creation.
if [ "${dryrun}" -ne 0 ]; then
if [ -n "${container_clone}" ]; then
container_image="${container_clone}"
fi
cmd="$(generate_create_command)"
cmd="$(echo "${cmd}" | sed 's/\t//g')"
printf "%s\n" "${cmd}"
exit 0
fi
# Check if the container already exists.
# If it does, notify the user and exit.
if ${container_manager} inspect --type container "${container_name}" > /dev/null 2>&1; then
printf "Distrobox named '%s' already exists.\n" "${container_name}"
printf "To enter, run:\n\n"
# If it's a rootful container AND user is not logged as root.
if [ "${rootful}" -eq 1 ] && [ "${container_user_uid}" -ne 0 ]; then
printf "distrobox enter --root %s\n\n" "${container_name}"
# If user is logged as root OR it's a rootless container.
elif [ "${container_user_uid}" -eq 0 ] || [ "${rootful}" -eq 0 ]; then
printf "distrobox enter %s\n\n" "${container_name}"
fi
exit 0
fi
# if we are using the clone flag, let's set the image variable
# to the output of container duplication
if [ -n "${container_clone}" ]; then
if ! echo "${container_manager}" | grep -Eq "podman|docker"; then
printf >&2 "ERROR: clone is only supported with docker and podman\n"
exit 127
fi
container_image="$(get_clone_image)"
fi
# First, check if the image exists in the host or auto-pull is enabled
# If not prompt to download it.
if [ "${container_always_pull}" -eq 1 ] ||
! ${container_manager} inspect --type image "${container_image}" > /dev/null 2>&1; then
# If we do auto-pull, don't ask questions
if [ "${non_interactive}" -eq 1 ] || [ "${container_always_pull}" -eq 1 ]; then
response="yes"
else
# Prompt to download it.
printf >&2 "Image %s not found.\n" "${container_image}"
printf >&2 "Do you want to pull the image now? [Y/n]: "
read -r response
response="${response:-"Y"}"
fi
# Accept only y,Y,Yes,yes,n,N,No,no.
case "${response}" in
y | Y | Yes | yes | YES)
# Pull the image
# shellcheck disable=SC2086
${container_manager} pull ${container_platform} "${container_image}"
;;
n | N | No | no | NO)
printf >&2 "next time, run this command first:\n"
printf >&2 "\t%s pull %s\n" "${container_manager}" "${container_image}"
exit 0
;;
*) # Default case: If no more options then break out of the loop.
printf >&2 "Invalid input.\n"
printf >&2 "The available choices are: y,Y,Yes,yes,YES or n,N,No,no,NO.\nExiting.\n"
exit 1
;;
esac
fi
# Generate the create command and run it
printf >&2 "Creating '%s' using image %s\t" "${container_name}" "${container_image}"
cmd="$(generate_create_command)"
# Eval the generated command. If successful display an helpful message.
# shellcheck disable=SC2086
if eval ${cmd} > /dev/null; then
printf >&2 "\033[32m [ OK ]\n\033[0mDistrobox '%s' successfully created.\n" "${container_name}"
printf >&2 "To enter, run:\n\n"
# If it's a rootful container AND user is not logged as root.
if [ "${rootful}" -eq 1 ] && [ "${container_user_uid}" -ne 0 ]; then
printf "distrobox enter --root %s\n\n" "${container_name}"
# If user is logged as root OR it's a rootless container.
elif [ "${container_user_uid}" -eq 0 ] || [ "${rootful}" -eq 0 ]; then
printf "distrobox enter %s\n\n" "${container_name}"
fi
# We've created the box, let's also create the entry
if [ "${rootful}" -eq 0 ]; then
if [ "${container_generate_entry}" -ne 0 ]; then
"${distrobox_genentry_path}" "${container_name}"
fi
fi
else
error="$?"
printf >&2 "\033[31m [ ERR ]\033[0m failed to create container.\n"
exit "${error}"
fi