diff --git a/.devcontainer.json b/.devcontainer/devcontainer.json similarity index 77% rename from .devcontainer.json rename to .devcontainer/devcontainer.json index c24649ab6..db939da5b 100644 --- a/.devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -1,18 +1,17 @@ { - "image": "mcr.microsoft.com/vscode/devcontainers/javascript-node:0-18", - "remoteUser": "node", - "customizations": { - "vscode": { - "extensions": [ - "mads-hartmann.bash-ide-vscode" - ] - } - }, + "image": "mcr.microsoft.com/devcontainers/javascript-node:3-22", "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": {} }, "postCreateCommand": "npm install -g @devcontainers/cli", "hostRequirements": { "cpus": 4 + }, + "customizations": { + "vscode": { + "extensions": [ + "mads-hartmann.bash-ide-vscode" + ] + } } } diff --git a/.github/workflows/docker-in-docker-stress-test.yaml b/.github/workflows/docker-in-docker-stress-test.yaml index 569f94699..1c9410a88 100644 --- a/.github/workflows/docker-in-docker-stress-test.yaml +++ b/.github/workflows/docker-in-docker-stress-test.yaml @@ -13,7 +13,7 @@ jobs: fail-fast: false runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Install latest devcontainer CLI" run: npm install -g @devcontainers/cli @@ -28,7 +28,7 @@ jobs: fail-fast: false runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Install latest devcontainer CLI" run: npm install -g @devcontainers/cli diff --git a/.github/workflows/linter-automated.yaml b/.github/workflows/linter-automated.yaml index 98921bf68..46cbd085a 100644 --- a/.github/workflows/linter-automated.yaml +++ b/.github/workflows/linter-automated.yaml @@ -1,5 +1,5 @@ name: "CI - Shell Script Linter" -on: +on: push: branches: - main @@ -9,7 +9,7 @@ jobs: shellchecker: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Shell Linter uses: azohra/shell-linter@v0.6.0 diff --git a/.github/workflows/linter-manual.yaml b/.github/workflows/linter-manual.yaml index ec2e5f0a7..019724d35 100644 --- a/.github/workflows/linter-manual.yaml +++ b/.github/workflows/linter-manual.yaml @@ -15,7 +15,7 @@ jobs: shellchecker: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Shell Linter uses: azohra/shell-linter@v0.6.0 diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 485096b5f..6890f5ae5 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -9,13 +9,16 @@ jobs: deploy: if: ${{ github.ref == 'refs/heads/main' }} runs-on: ubuntu-latest + permissions: + packages: write + contents: write steps: - - uses: actions/checkout@v3 - + - uses: actions/checkout@v4 + - name: "Publish" uses: devcontainers/action@v1 with: publish-features: "true" base-path-to-features: "./src" env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/test-all.yaml b/.github/workflows/test-all.yaml index 40f5efa75..7e4c343bd 100644 --- a/.github/workflows/test-all.yaml +++ b/.github/workflows/test-all.yaml @@ -48,9 +48,10 @@ jobs: "debian:12", "mcr.microsoft.com/devcontainers/base:ubuntu", "mcr.microsoft.com/devcontainers/base:debian", + "mcr.microsoft.com/devcontainers/base:noble" ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Install latest devcontainer CLI" run: npm install -g @devcontainers/cli @@ -93,7 +94,7 @@ jobs: "nix", ] steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Install latest devcontainer CLI" run: npm install -g @devcontainers/cli @@ -105,7 +106,7 @@ jobs: runs-on: ubuntu-latest continue-on-error: true steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Install latest devcontainer CLI" run: npm install -g @devcontainers/cli diff --git a/.github/workflows/test-manual.yaml b/.github/workflows/test-manual.yaml index f163eb8ea..10d099b25 100644 --- a/.github/workflows/test-manual.yaml +++ b/.github/workflows/test-manual.yaml @@ -19,7 +19,7 @@ jobs: test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Install latest devcontainer CLI" run: npm install -g @devcontainers/cli diff --git a/.github/workflows/test-pr.yaml b/.github/workflows/test-pr.yaml index d60733574..c5930d97e 100644 --- a/.github/workflows/test-pr.yaml +++ b/.github/workflows/test-pr.yaml @@ -8,7 +8,7 @@ jobs: outputs: features: ${{ steps.filter.outputs.changes }} steps: - - uses: dorny/paths-filter@v2 + - uses: dorny/paths-filter@v3 id: filter with: filters: | @@ -17,7 +17,7 @@ jobs: azure-cli: ./**/azure-cli/** common-utils: ./**/common-utils/** conda: ./**/conda/** - desktop-lite: ./**/desktop-lite/** + desktop-lite: ./**/desktop-lite/** docker-outside-of-docker: ./**/docker-outside-of-docker/** docker-in-docker: ./**/docker-in-docker/** dotnet: ./**/dotnet/** @@ -55,9 +55,19 @@ jobs: "debian:12", "mcr.microsoft.com/devcontainers/base:ubuntu", "mcr.microsoft.com/devcontainers/base:debian", + "mcr.microsoft.com/devcontainers/base:noble" ] + exclude: + - features: oryx + baseImage: ubuntu:jammy + - features: oryx + baseImage: mcr.microsoft.com/devcontainers/base:ubuntu + - features: docker-in-docker + baseImage: mcr.microsoft.com/devcontainers/base:debian + - features: docker-outside-of-docker + baseImage: mcr.microsoft.com/devcontainers/base:debian steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Install latest devcontainer CLI" run: npm install -g @devcontainers/cli @@ -67,13 +77,13 @@ jobs: test-scenarios: needs: [detect-changes] - runs-on: ubuntu-latest + runs-on: devcontainer-image-builder-ubuntu continue-on-error: true strategy: matrix: features: ${{ fromJSON(needs.detect-changes.outputs.features) }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: "Install latest devcontainer CLI" run: npm install -g @devcontainers/cli diff --git a/.github/workflows/update-aws-cli-completer-scripts.yml b/.github/workflows/update-aws-cli-completer-scripts.yml new file mode 100644 index 000000000..41d67189f --- /dev/null +++ b/.github/workflows/update-aws-cli-completer-scripts.yml @@ -0,0 +1,58 @@ +name: "Updates vendor 'aws_bash_completer' and 'aws_zsh_completer.sh' scripts" +on: + workflow_dispatch: + schedule: + - cron: '0 0 * * 0' # Runs every Sunday at midnight UTC (adjust as needed) + +jobs: + fetch-latest-aws-completer-scripts: + runs-on: ubuntu-latest + environment: documentation # grants access to secrets.PAT, for creating pull requests + permissions: + contents: write + pull-requests: write + steps: + - uses: actions/checkout@v4 + + - name: Run fetch-latest-completer-scripts.sh + run: src/aws-cli/scripts/fetch-latest-completer-scripts.sh + + - name: Create a PR for completer scripts + id: push_image_info + env: + GITHUB_TOKEN: ${{ secrets.PAT }} + run: | + set -e + echo "Start." + + # Configure git and Push updates + git config --global user.email github-actions@github.com + git config --global user.name github-actions + git config pull.rebase false + + branch=automated-script-update-$GITHUB_RUN_ID + git checkout -b $branch + message='[Updates] Automated vendor 'aws-cli' completer scripts' + + # Add / update and commit + git add src/aws-cli/scripts/vendor/aws_bash_completer + git add src/aws-cli/scripts/vendor/aws_zsh_completer.sh + + git commit -m 'Automated completer scripts update' || export NO_UPDATES=true + + # Bump version and push + if [ "$NO_UPDATES" != "true" ] ; then + echo "$(jq --indent 4 '.version = (.version | split(".") | map(tonumber) | .[2] += 1 | join("."))' src/aws-cli/devcontainer-feature.json)" > src/aws-cli/devcontainer-feature.json + git add src/aws-cli/devcontainer-feature.json + + git commit -m 'Bump version' + git push origin "$branch" + gh api \ + --method POST \ + -H "Accept: application/vnd.github+json" \ + /repos/${GITHUB_REPOSITORY}/pulls \ + -f title="$message" \ + -f body="$message" \ + -f head="$branch" \ + -f base="$GITHUB_REF_NAME" + fi diff --git a/.github/workflows/update-documentation.yml b/.github/workflows/update-documentation.yml index 6976e62ea..d74d970d5 100644 --- a/.github/workflows/update-documentation.yml +++ b/.github/workflows/update-documentation.yml @@ -9,9 +9,12 @@ jobs: generate: runs-on: ubuntu-latest environment: documentation + permissions: + contents: write + pull-requests: write if: "github.ref == 'refs/heads/main'" steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Generate Documentation uses: devcontainers/action@v1 diff --git a/.github/workflows/update-dotnet-install-script.yml b/.github/workflows/update-dotnet-install-script.yml index 56adf854d..604f6880b 100644 --- a/.github/workflows/update-dotnet-install-script.yml +++ b/.github/workflows/update-dotnet-install-script.yml @@ -7,8 +7,12 @@ on: jobs: fetch-latest-dotnet-install: runs-on: ubuntu-latest + environment: documentation # grants access to secrets.PAT, for creating pull requests + permissions: + contents: write + pull-requests: write steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Run fetch-latest-dotnet-install.sh run: src/dotnet/scripts/fetch-latest-dotnet-install.sh @@ -21,6 +25,9 @@ jobs: set -e echo "Start." + # Update dotnet-install for Oryx Feature as well + cp src/dotnet/scripts/vendor/dotnet-install.sh src/oryx/scripts/vendor/dotnet-install.sh + # Configure git and Push updates git config --global user.email github-actions@github.com git config --global user.name github-actions @@ -32,10 +39,19 @@ jobs: # Add / update and commit git add src/dotnet/scripts/vendor/dotnet-install.sh + git add src/oryx/scripts/vendor/dotnet-install.sh + git commit -m 'Automated dotnet-install script update' || export NO_UPDATES=true - # Push + # Bump version and push if [ "$NO_UPDATES" != "true" ] ; then + echo "$(jq --indent 4 '.version = (.version | split(".") | map(tonumber) | .[2] += 1 | join("."))' src/dotnet/devcontainer-feature.json)" > src/dotnet/devcontainer-feature.json + git add src/dotnet/devcontainer-feature.json + + echo "$(jq --indent 4 '.version = (.version | split(".") | map(tonumber) | .[2] += 1 | join("."))' src/oryx/devcontainer-feature.json)" > src/oryx/devcontainer-feature.json + git add src/oryx/devcontainer-feature.json + + git commit -m 'Bump version' git push origin "$branch" gh api \ --method POST \ diff --git a/.github/workflows/validate-metadata-files.yml b/.github/workflows/validate-metadata-files.yml index dbba8e2b4..863418e93 100644 --- a/.github/workflows/validate-metadata-files.yml +++ b/.github/workflows/validate-metadata-files.yml @@ -7,8 +7,8 @@ jobs: validate: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - + - uses: actions/checkout@v4 + - name: "Validate devcontainer-feature.json files" uses: devcontainers/action@v1 with: diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 000000000..034e84803 --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,21 @@ +# Security Policy + +## Supported Versions + +Use this section to tell people about which versions of your project are +currently being supported with security updates. + +| Version | Supported | +| ------- | ------------------ | +| 5.1.x | :white_check_mark: | +| 5.0.x | :x: | +| 4.0.x | :white_check_mark: | +| < 4.0 | :x: | + +## Reporting a Vulnerability + +Use this section to tell people how to report a vulnerability. + +Tell them where to go, how often they can expect to get an update on a +reported vulnerability, what to expect if the vulnerability is accepted or +declined, etc. diff --git a/src/anaconda/NOTES.md b/src/anaconda/NOTES.md index 394dd6f1d..4a372cade 100644 --- a/src/anaconda/NOTES.md +++ b/src/anaconda/NOTES.md @@ -17,4 +17,7 @@ conda install python=3.7 This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +Also RHEL based linux distributions such as almalinux, rockylinux, fedora are supported now. +Please do note that Alpine and cbl-mariner aren't supported due system level restrictions with the anaconda installer. + `bash` is required to execute the `install.sh` script. diff --git a/src/anaconda/README.md b/src/anaconda/README.md index 3346823b4..f093ce618 100644 --- a/src/anaconda/README.md +++ b/src/anaconda/README.md @@ -35,7 +35,7 @@ conda install python=3.7 ## OS Support This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. - +Also RHEL based linux distributions such as almalinux, rockylinux, fedora are supported now. `bash` is required to execute the `install.sh` script. diff --git a/src/anaconda/devcontainer-feature.json b/src/anaconda/devcontainer-feature.json index e0d013d86..ac76a9b99 100644 --- a/src/anaconda/devcontainer-feature.json +++ b/src/anaconda/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "anaconda", - "version": "1.0.11", + "version": "1.1.0", "name": "Anaconda", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/anaconda", "options": { @@ -17,6 +17,17 @@ "CONDA_DIR": "/usr/local/conda", "PATH": "/usr/local/conda/bin:${PATH}" }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes Anaconda and the conda package manager pre-installed and available on the `PATH` for data science and Python development. Additional packages installed using Conda will be downloaded from Anaconda or another repository configured by the user. A user can install different versions of Python than the one in this dev container by running a command like: conda install python=3.7" + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] diff --git a/src/anaconda/install.sh b/src/anaconda/install.sh index 53c606a1e..6f57a3144 100755 --- a/src/anaconda/install.sh +++ b/src/anaconda/install.sh @@ -13,11 +13,63 @@ USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}" UPDATE_RC="${UPDATE_RC:-"true"}" CONDA_DIR="${CONDA_DIR:-"/usr/local/conda"}" -set -eux +set -euo pipefail export DEBIAN_FRONTEND=noninteractive +# Detect package manager and set install command +detect_package_manager() { + if command -v apt-get > /dev/null; then + PKG_MANAGER="apt-get" + PKG_UPDATE="apt-get update -y" + PKG_INSTALL="apt-get -y install --no-install-recommends" + PKG_CLEAN="apt-get -y clean" + PKG_LISTS="/var/lib/apt/lists/*" + PKG_QUERY="dpkg -s" + elif command -v apk > /dev/null; then + PKG_MANAGER="apk" + PKG_UPDATE="apk update" + PKG_INSTALL="apk add --no-cache" + PKG_CLEAN="rm -rf /var/cache/apk/*" + PKG_LISTS="/var/cache/apk/*" + PKG_QUERY="apk info -e" + elif command -v dnf > /dev/null; then + PKG_MANAGER="dnf" + PKG_UPDATE="dnf -y makecache" + PKG_INSTALL="dnf -y install" + PKG_CLEAN="dnf clean all" + PKG_LISTS="/var/cache/dnf/*" + PKG_QUERY="rpm -q" + elif command -v microdnf > /dev/null; then + PKG_MANAGER="microdnf" + PKG_UPDATE="microdnf update" + PKG_INSTALL="microdnf install -y" + PKG_CLEAN="microdnf clean all" + PKG_LISTS="/var/cache/yum/*" + PKG_QUERY="rpm -q" + elif command -v tdnf > /dev/null; then + PKG_MANAGER="tdnf" + PKG_UPDATE="tdnf makecache" + PKG_INSTALL="tdnf install -y" + PKG_CLEAN="tdnf clean all" + PKG_LISTS="/var/cache/tdnf/*" + PKG_QUERY="rpm -q" + elif command -v yum > /dev/null; then + PKG_MANAGER="yum" + PKG_UPDATE="yum -y makecache" + PKG_INSTALL="yum -y install" + PKG_CLEAN="yum clean all" + PKG_LISTS="/var/cache/yum/*" + PKG_QUERY="rpm -q" + else + echo "No supported package manager found (apt-get, apk, dnf, microdnf, tdnf, yum)." + exit 1 + fi +} + +detect_package_manager + # Clean up -rm -rf /var/lib/apt/lists/* +eval "$PKG_CLEAN" if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' @@ -47,7 +99,12 @@ elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then fi architecture="$(uname -m)" -if [ "${architecture}" != "x86_64" ]; then +# Normalize arm64 to aarch64 for consistency +if [ "${architecture}" = "arm64" ]; then + architecture="aarch64" +fi + +if [ "${architecture}" != "x86_64" ] && [ "${architecture}" != "aarch64" ]; then echo "(!) Architecture $architecture unsupported" exit 1 fi @@ -66,12 +123,76 @@ updaterc() { # Checks if packages are installed and installs them if not check_packages() { - if ! dpkg -s "$@" > /dev/null 2>&1; then - if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then - echo "Running apt-get update..." - apt-get update -y + for pkg in "$@"; do + # Use PKG_QUERY variable to check if package is installed + if ! eval "$PKG_QUERY $pkg" > /dev/null 2>&1; then + # Package not installed, check if we need to update package lists + if [ "$PKG_MANAGER" = "apt-get" ]; then + # For apt-get, check if package lists are empty + if [ "$(find "$PKG_LISTS" | wc -l)" = "0" ]; then + echo "Running $PKG_UPDATE..." + eval "$PKG_UPDATE" + fi + else + # For other package managers, always update before installing + echo "Running $PKG_UPDATE..." + eval "$PKG_UPDATE" + fi + + # Install the package + echo "Installing package: $pkg" + eval "$PKG_INSTALL $pkg" + else + echo "Package $pkg is already installed" fi - apt-get -y install --no-install-recommends "$@" + done +} + +sudo_if() { + COMMAND="$*" + if [ "$(id -u)" -eq 0 ] && [ "$USERNAME" != "root" ]; then + if command -v runuser > /dev/null; then + runuser -l "$USERNAME" -c "$COMMAND" + elif command -v su > /dev/null; then + su - "$USERNAME" -c "$COMMAND" + elif command -v sudo > /dev/null; then + sudo -u "$USERNAME" -i bash -c "$COMMAND" + else + # Fallback: execute as root (not ideal but works in containers) + echo "Warning: No user switching command available, running as root" + eval "$COMMAND" + fi + else + eval "$COMMAND" + fi +} + +run_as_user() { + local user="$1" + shift + local cmd="$*" + + if command -v runuser > /dev/null; then + if [ "$PKG_MANAGER" = "apk" ]; then + runuser "$user" -c "$cmd" + else + runuser -l "$user" -c "$cmd" + fi + elif command -v su > /dev/null; then + if [ "$PKG_MANAGER" = "apk" ]; then + su "$user" -c "$cmd" + else + su --login -c "$cmd" "$user" + fi + elif command -v sudo > /dev/null; then + if [ "$PKG_MANAGER" = "apk" ]; then + sudo -u "$user" sh -c "$cmd" + else + sudo -u "$user" -i bash -c "$cmd" + fi + else + echo "Warning: No user switching command available, running as root" + eval "$cmd" fi } @@ -83,29 +204,46 @@ if ! conda --version &> /dev/null ; then usermod -a -G conda "${USERNAME}" # Install dependencies - check_packages wget ca-certificates + if [ "$PKG_MANAGER" = "apt-get" ]; then + check_packages wget ca-certificates libgtk-3-0 + elif [ "$PKG_MANAGER" = "apk" ]; then + check_packages wget ca-certificates gtk+3.0 + else + check_packages wget ca-certificates gtk3 + fi mkdir -p $CONDA_DIR + chown -R "${USERNAME}:conda" "${CONDA_DIR}" - chmod -R g+r+w "${CONDA_DIR}" - - find "${CONDA_DIR}" -type d -print0 | xargs -n 1 -0 chmod g+s + chmod -R g+r+w "${CONDA_DIR}" + echo "Installing Anaconda..." CONDA_VERSION=$VERSION if [ "${VERSION}" = "latest" ] || [ "${VERSION}" = "lts" ]; then - CONDA_VERSION="2021.11" + CONDA_VERSION="2024.10-1" fi - su --login -c "wget -q https://repo.anaconda.com/archive/Anaconda3-${CONDA_VERSION}-Linux-x86_64.sh -O /tmp/anaconda-install.sh \ - && /bin/bash /tmp/anaconda-install.sh -u -b -p ${CONDA_DIR}" ${USERNAME} 2>&1 + if [ "${architecture}" = "x86_64" ]; then + run_as_user "${USERNAME}" "export http_proxy=${http_proxy:-} && export https_proxy=${https_proxy:-} \ + && wget -q https://repo.anaconda.com/archive/Anaconda3-${CONDA_VERSION}-Linux-x86_64.sh -O /tmp/anaconda-install.sh \ + && /bin/bash /tmp/anaconda-install.sh -u -b -p ${CONDA_DIR}" + elif [ "${architecture}" = "aarch64" ]; then + run_as_user "${USERNAME}" "export http_proxy=${http_proxy:-} && export https_proxy=${https_proxy:-} \ + && wget -q https://repo.anaconda.com/archive/Anaconda3-${CONDA_VERSION}-Linux-aarch64.sh -O /tmp/anaconda-install.sh \ + && /bin/bash /tmp/anaconda-install.sh -u -b -p ${CONDA_DIR}" + fi if [ "${VERSION}" = "latest" ] || [ "${VERSION}" = "lts" ]; then PATH=$PATH:${CONDA_DIR}/bin conda update -y conda fi - rm /tmp/anaconda-install.sh + chown -R "${USERNAME}:conda" "${CONDA_DIR}" + chmod -R g+r+w "${CONDA_DIR}" + + + rm /tmp/anaconda-install.sh updaterc "export CONDA_DIR=${CONDA_DIR}/bin" fi @@ -134,7 +272,6 @@ if [ -f "/etc/bash.bashrc" ]; then echo "${notice_script}" | tee -a /etc/bash.bashrc fi -# Clean up -rm -rf /var/lib/apt/lists/* - +# Final clean up +eval "$PKG_CLEAN" echo "Done!" diff --git a/src/aws-cli/devcontainer-feature.json b/src/aws-cli/devcontainer-feature.json index 8de431868..54cc4b29b 100644 --- a/src/aws-cli/devcontainer-feature.json +++ b/src/aws-cli/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "aws-cli", - "version": "1.0.6", + "version": "1.1.2", "name": "AWS CLI", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/aws-cli", "description": "Installs the AWS CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.", @@ -18,7 +18,14 @@ "vscode": { "extensions": [ "AmazonWebServices.aws-toolkit-vscode" - ] + ], + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the AWS CLI along with needed dependencies pre-installed and available on the `PATH`, along with the AWS Toolkit extensions for AWS development." + } + ] + } } }, "installsAfter": [ diff --git a/src/aws-cli/install.sh b/src/aws-cli/install.sh index a8b414ee6..4ff9bfde6 100755 --- a/src/aws-cli/install.sh +++ b/src/aws-cli/install.sh @@ -50,48 +50,29 @@ if [ "$(id -u)" -ne 0 ]; then exit 1 fi -# Get central common setting -get_common_setting() { - if [ "${common_settings_file_loaded}" != "true" ]; then - curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping." - common_settings_file_loaded=true - fi - if [ -f "/tmp/vsdc-settings.env" ]; then - local multi_line="" - if [ "$2" = "true" ]; then multi_line="-z"; fi - local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')" - if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi - fi - echo "$1=${!1}" -} - apt_get_update() { - echo "Running apt-get update..." - apt-get update -y + if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then + echo "Running apt-get update..." + apt-get update -y + fi } # Checks if packages are installed and installs them if not check_packages() { if ! dpkg -s "$@" > /dev/null 2>&1; then - if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then - echo "Running apt-get update..." - apt-get update -y - fi + apt_get_update apt-get -y install --no-install-recommends "$@" fi } export DEBIAN_FRONTEND=noninteractive -check_packages curl ca-certificates gnupg2 dirmngr unzip +check_packages curl ca-certificates gpg dirmngr unzip bash-completion less verify_aws_cli_gpg_signature() { local filePath=$1 local sigFilePath=$2 - - get_common_setting AWSCLI_GPG_KEY - get_common_setting AWSCLI_GPG_KEY_MATERIAL true local awsGpgKeyring=aws-cli-public-key.gpg echo "${AWSCLI_GPG_KEY_MATERIAL}" | gpg --dearmor > "./${awsGpgKeyring}" @@ -132,6 +113,17 @@ install() { unzip "${scriptZipFile}" ./aws/install + # kubectl bash completion + mkdir -p /etc/bash_completion.d + cp ./scripts/vendor/aws_bash_completer /etc/bash_completion.d/aws + + # kubectl zsh completion + if [ -e "${USERHOME}/.oh-my-zsh" ]; then + mkdir -p "${USERHOME}/.oh-my-zsh/completions" + cp ./scripts/vendor/aws_zsh_completer.sh "${USERHOME}/.oh-my-zsh/completions/_aws" + chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh" + fi + rm -rf ./aws } diff --git a/src/aws-cli/scripts/fetch-latest-completer-scripts.sh b/src/aws-cli/scripts/fetch-latest-completer-scripts.sh new file mode 100755 index 000000000..47dc6ee1d --- /dev/null +++ b/src/aws-cli/scripts/fetch-latest-completer-scripts.sh @@ -0,0 +1,20 @@ +#!/bin/bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/devcontainers/features/tree/main/src/aws-cli +# Maintainer: The Dev Container spec maintainers +# +# Run this script to replace aws_bash_completer and aws_zsh_completer.sh with the latest and greatest available version +# +COMPLETER_SCRIPTS=$(dirname "${BASH_SOURCE[0]}") +BASH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_bash_completer" +ZSH_COMPLETER_SCRIPT="$COMPLETER_SCRIPTS/vendor/aws_zsh_completer.sh" + +wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_bash_completer -O "$BASH_COMPLETER_SCRIPT" +chmod +x "$BASH_COMPLETER_SCRIPT" + +wget https://raw.githubusercontent.com/aws/aws-cli/v2/bin/aws_zsh_completer.sh -O "$ZSH_COMPLETER_SCRIPT" +chmod +x "$ZSH_COMPLETER_SCRIPT" diff --git a/src/aws-cli/scripts/vendor/README.md b/src/aws-cli/scripts/vendor/README.md new file mode 100644 index 000000000..1f3838d46 --- /dev/null +++ b/src/aws-cli/scripts/vendor/README.md @@ -0,0 +1,12 @@ +### **IMPORTANT NOTE** + +Scripts in this directory are sourced externally and not maintained by the Dev Container spec maintainers. Do not make changes directly as they might be overwritten at any moment. + +## aws_bash_completer + +`aws_bash_completer` is a copy of . + +## aws_zsh_completer.sh + +`aws_zsh_completer.sh` is a copy of . + diff --git a/src/aws-cli/scripts/vendor/aws_bash_completer b/src/aws-cli/scripts/vendor/aws_bash_completer new file mode 100755 index 000000000..32cd1dd8f --- /dev/null +++ b/src/aws-cli/scripts/vendor/aws_bash_completer @@ -0,0 +1,6 @@ +# Typically that would be added under one of the following paths: +# - /etc/bash_completion.d +# - /usr/local/etc/bash_completion.d +# - /usr/share/bash-completion/completions + +complete -C aws_completer aws diff --git a/src/aws-cli/scripts/vendor/aws_zsh_completer.sh b/src/aws-cli/scripts/vendor/aws_zsh_completer.sh new file mode 100755 index 000000000..c1b2c1244 --- /dev/null +++ b/src/aws-cli/scripts/vendor/aws_zsh_completer.sh @@ -0,0 +1,60 @@ +# Source this file to activate auto completion for zsh using the bash +# compatibility helper. Make sure to run `compinit` before, which should be +# given usually. +# +# % source /path/to/zsh_complete.sh +# +# Typically that would be called somewhere in your .zshrc. +# +# Note, the overwrite of _bash_complete() is to export COMP_LINE and COMP_POINT +# That is only required for zsh <= edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570 +# +# https://github.com/zsh-users/zsh/commit/edab1d3dbe61da7efe5f1ac0e40444b2ec9b9570 +# +# zsh releases prior to that version do not export the required env variables! + +autoload -Uz bashcompinit +bashcompinit -i + +_bash_complete() { + local ret=1 + local -a suf matches + local -x COMP_POINT COMP_CWORD + local -a COMP_WORDS COMPREPLY BASH_VERSINFO + local -x COMP_LINE="$words" + local -A savejobstates savejobtexts + + (( COMP_POINT = 1 + ${#${(j. .)words[1,CURRENT]}} + $#QIPREFIX + $#IPREFIX + $#PREFIX )) + (( COMP_CWORD = CURRENT - 1)) + COMP_WORDS=( $words ) + BASH_VERSINFO=( 2 05b 0 1 release ) + + savejobstates=( ${(kv)jobstates} ) + savejobtexts=( ${(kv)jobtexts} ) + + [[ ${argv[${argv[(I)nospace]:-0}-1]} = -o ]] && suf=( -S '' ) + + matches=( ${(f)"$(compgen $@ -- ${words[CURRENT]})"} ) + + if [[ -n $matches ]]; then + if [[ ${argv[${argv[(I)filenames]:-0}-1]} = -o ]]; then + compset -P '*/' && matches=( ${matches##*/} ) + compset -S '/*' && matches=( ${matches%%/*} ) + compadd -Q -f "${suf[@]}" -a matches && ret=0 + else + compadd -Q "${suf[@]}" -a matches && ret=0 + fi + fi + + if (( ret )); then + if [[ ${argv[${argv[(I)default]:-0}-1]} = -o ]]; then + _default "${suf[@]}" && ret=0 + elif [[ ${argv[${argv[(I)dirnames]:-0}-1]} = -o ]]; then + _directories "${suf[@]}" && ret=0 + fi + fi + + return ret +} + +complete -C aws_completer aws diff --git a/src/azure-cli/README.md b/src/azure-cli/README.md index 2e7b6d39b..217019c51 100644 --- a/src/azure-cli/README.md +++ b/src/azure-cli/README.md @@ -18,6 +18,7 @@ Installs the Azure CLI along with needed dependencies. Useful for base Dockerfil | version | Select or enter an Azure CLI version. (Available versions may vary by Linux distribution.) | string | latest | | extensions | Optional comma separated list of Azure CLI extensions to install in profile. | string | - | | installBicep | Optionally install Azure Bicep | boolean | false | +| bicepVersion | Select or enter a Bicep version. ('latest' or a specic version such as 'v0.31.92') | string | latest | | installUsingPython | Install Azure CLI using Python instead of pipx | boolean | false | ## Customizations diff --git a/src/azure-cli/devcontainer-feature.json b/src/azure-cli/devcontainer-feature.json index 3d1c0dfa3..3bc905f47 100644 --- a/src/azure-cli/devcontainer-feature.json +++ b/src/azure-cli/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "azure-cli", - "version": "1.2.1", + "version": "1.2.8", "name": "Azure CLI", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/azure-cli", "description": "Installs the Azure CLI along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.", @@ -23,6 +23,14 @@ "description": "Optionally install Azure Bicep", "default": false }, + "bicepVersion": { + "type": "string", + "proposals": [ + "latest" + ], + "default": "latest", + "description": "Select or enter a Bicep version. ('latest' or a specic version such as 'v0.31.92')" + }, "installUsingPython": { "type": "boolean", "description": "Install Azure CLI using Python instead of pipx", @@ -33,7 +41,14 @@ "vscode": { "extensions": [ "ms-vscode.azurecli" - ] + ], + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the Azure CLI along with needed dependencies pre-installed and available on the `PATH`, along with the Azure CLI extension for Azure development." + } + ] + } } }, "installsAfter": [ diff --git a/src/azure-cli/install.sh b/src/azure-cli/install.sh index 51759488d..171c24510 100755 --- a/src/azure-cli/install.sh +++ b/src/azure-cli/install.sh @@ -15,10 +15,11 @@ rm -rf /var/lib/apt/lists/* AZ_VERSION=${VERSION:-"latest"} AZ_EXTENSIONS=${EXTENSIONS} AZ_INSTALLBICEP=${INSTALLBICEP:-false} -INSTALL_USING_PYTHON=${INSTALL_USING_PYTHON:-true} +AZ_BICEPVERSION=${BICEPVERSION:-latest} +INSTALL_USING_PYTHON=${INSTALLUSINGPYTHON:-false} MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc" -AZCLI_ARCHIVE_ARCHITECTURES="amd64" -AZCLI_ARCHIVE_VERSION_CODENAMES="stretch buster bullseye bionic focal jammy" +AZCLI_ARCHIVE_ARCHITECTURES="amd64 arm64" +AZCLI_ARCHIVE_VERSION_CODENAMES="stretch bookworm buster bullseye bionic focal jammy noble trixie" if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' @@ -32,21 +33,6 @@ fi echo "Effective REMOTE_USER: ${_REMOTE_USER}" -# Get central common setting -get_common_setting() { - if [ "${common_settings_file_loaded}" != "true" ]; then - curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping." - common_settings_file_loaded=true - fi - if [ -f "/tmp/vsdc-settings.env" ]; then - local multi_line="" - if [ "$2" = "true" ]; then multi_line="-z"; fi - local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')" - if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi - fi - echo "$1=${!1}" -} - apt_get_update() { echo "Running apt-get update..." @@ -110,7 +96,6 @@ install_using_apt() { # Install dependencies check_packages apt-transport-https curl ca-certificates gnupg2 dirmngr # Import key safely (new 'signed-by' method rather than deprecated apt-key approach) and install - get_common_setting MICROSOFT_GPG_KEYS_URI curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/azure-cli/ ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/azure-cli.list apt-get update @@ -199,18 +184,20 @@ install_with_complete_python_installation() { export DEBIAN_FRONTEND=noninteractive -# See if we're on x86_64 and if so, install via apt-get, otherwise use pip3 +# See if we're on x86_64 or AARCH64 and if so, install via apt-get, otherwise use pip3 echo "(*) Installing Azure CLI..." . /etc/os-release architecture="$(dpkg --print-architecture)" CACHED_AZURE_VERSION="${AZ_VERSION}" # In case we need to fallback to pip and the apt path has modified the AZ_VERSION variable. -if [[ "${AZCLI_ARCHIVE_ARCHITECTURES}" = *"${architecture}"* ]] && [[ "${AZCLI_ARCHIVE_VERSION_CODENAMES}" = *"${VERSION_CODENAME}"* ]]; then - install_using_apt || use_pip="true" +if [ "${INSTALL_USING_PYTHON}" != "true" ]; then + if [[ "${AZCLI_ARCHIVE_ARCHITECTURES}" = *"${architecture}"* ]] && [[ "${AZCLI_ARCHIVE_VERSION_CODENAMES}" = *"${VERSION_CODENAME}"* ]]; then + install_using_apt || use_pip="true" + fi else use_pip="true" fi -if [ "${use_pip}" = "true" ]; then +if [ "${use_pip}" = "true" ]; then AZ_VERSION=${CACHED_AZURE_VERSION} install_using_pip_strategy @@ -243,10 +230,16 @@ if [ "${AZ_INSTALLBICEP}" = "true" ]; then # The `az bicep install --target-platform` could be a solution; however, linux-arm64 is not an allowed value for this argument yet # Manually installing Bicep and moving to the appropriate directory where az expects it to be + if [ "${AZ_BICEPVERSION}" = "latest" ]; then + bicep_download_path="https://github.com/Azure/bicep/releases/latest/download" + else + bicep_download_path="https://github.com/Azure/bicep/releases/download/${AZ_BICEPVERSION}" + fi + if [ "${architecture}" = "arm64" ]; then - curl -Lo bicep https://github.com/Azure/bicep/releases/latest/download/bicep-linux-arm64 + curl -Lo bicep ${bicep_download_path}/bicep-linux-arm64 else - curl -Lo bicep https://github.com/Azure/bicep/releases/latest/download/bicep-linux-x64 + curl -Lo bicep ${bicep_download_path}/bicep-linux-x64 fi chmod +x ./bicep diff --git a/src/common-utils/devcontainer-feature.json b/src/common-utils/devcontainer-feature.json index 869812be6..14850232d 100644 --- a/src/common-utils/devcontainer-feature.json +++ b/src/common-utils/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "common-utils", - "version": "2.1.3", + "version": "2.5.4", "name": "Common Utilities", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/common-utils", "description": "Installs a set of common command line utilities, Oh My Zsh!, and sets up a non-root user.", diff --git a/src/common-utils/install.sh b/src/common-utils/install.sh index 8f1ece4d7..a89490347 100755 --- a/src/common-utils/install.sh +++ b/src/common-utils/install.sh @@ -31,6 +31,8 @@ fi if [ "${ID}" = "alpine" ]; then apk add --no-cache bash fi - +if [ "${ID}" = "azurelinux" ]; then + tdnf install -y curl git +fi exec /bin/bash "$(dirname $0)/main.sh" "$@" exit $? diff --git a/src/common-utils/main.sh b/src/common-utils/main.sh index 4a048db71..68abe58c8 100644 --- a/src/common-utils/main.sh +++ b/src/common-utils/main.sh @@ -32,6 +32,7 @@ install_debian_packages() { if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then package_list="${package_list} \ apt-utils \ + bash-completion \ openssh-client \ gnupg2 \ dirmngr \ @@ -48,6 +49,7 @@ install_debian_packages() { ca-certificates \ unzip \ bzip2 \ + xz-utils \ zip \ nano \ vim-tiny \ @@ -103,18 +105,22 @@ install_debian_packages() { # Needed for adding manpages-posix and manpages-posix-dev which are non-free packages in Debian if [ "${ADD_NON_FREE_PACKAGES}" = "true" ]; then - # Bring in variables from /etc/os-release like VERSION_CODENAME - sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list - sed -i -E "s/deb-src http:\/\/(deb|httredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list - sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list - sed -i -E "s/deb-src http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list - sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list - sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list - sed -i "s/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list - sed -i "s/deb-src http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list - # Handle bullseye location for security https://www.debian.org/releases/bullseye/amd64/release-notes/ch-information.en.html - sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list - sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list + if [[ ! -e "/etc/apt/sources.list" ]] && [[ -e "/etc/apt/sources.list.d/debian.sources" ]]; then + sed -i '/^URIs: http:\/\/deb.debian.org\/debian$/ { N; N; s/Components: main/Components: main non-free non-free-firmware/ }' /etc/apt/sources.list.d/debian.sources + else + # Bring in variables from /etc/os-release like VERSION_CODENAME + sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list + sed -i -E "s/deb-src http:\/\/(deb|httredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list + sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list + sed -i -E "s/deb-src http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb-src http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list + # Handle bullseye location for security https://www.debian.org/releases/bullseye/amd64/release-notes/ch-information.en.html + sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list + fi; echo "Running apt-get update..." package_list="${package_list} manpages-posix manpages-posix-dev" fi @@ -154,14 +160,24 @@ install_debian_packages() { install_redhat_packages() { local package_list="" local remove_epel="false" - local install_cmd=dnf - if ! type dnf > /dev/null 2>&1; then - install_cmd=yum - fi + local install_cmd=microdnf + if type microdnf > /dev/null 2>&1; then + install_cmd=microdnf + elif type tdnf > /dev/null 2>&1; then + install_cmd=tdnf + elif type dnf > /dev/null 2>&1; then + install_cmd=dnf + elif type yum > /dev/null 2>&1; then + install_cmd=yum + else + echo "Unable to find 'tdnf', 'dnf', or 'yum' package manager. Exiting." + exit 1 +fi if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then package_list="${package_list} \ gawk \ + bash-completion \ openssh-clients \ gnupg2 \ iproute \ @@ -173,6 +189,7 @@ install_redhat_packages() { ca-certificates \ rsync \ unzip \ + xz \ zip \ nano \ vim-minimal \ @@ -189,11 +206,11 @@ install_redhat_packages() { man-db \ strace" - # rockylinux:9 installs 'curl-minimal' which clashes with 'curl' - # Install 'curl' for every OS except this rockylinux:9 - if [[ "${ID}" = "rocky" ]] && [[ "${VERSION}" != *"9."* ]]; then - package_list="${package_list} curl" - fi + # rockylinux:9 installs 'curl-minimal' which clashes with 'curl' + # Install 'curl' for every OS except this rockylinux:9 + if [[ "${ID}" = "rocky" ]] && [[ "${VERSION}" != *"9."* ]]; then + package_list="${package_list} curl" + fi # Install OpenSSL 1.0 compat if needed if ${install_cmd} -q list compat-openssl10 >/dev/null 2>&1; then @@ -222,7 +239,9 @@ install_redhat_packages() { package_list="${package_list} zsh" fi - ${install_cmd} -y install ${package_list} + if [ -n "${package_list}" ]; then + ${install_cmd} -y install ${package_list} + fi # Get to latest versions of all packages if [ "${UPGRADE_PACKAGES}" = "true" ]; then @@ -243,6 +262,7 @@ install_alpine_packages() { if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then apk add --no-cache \ openssh-client \ + bash-completion \ gnupg \ procps \ lsof \ @@ -254,6 +274,7 @@ install_alpine_packages() { rsync \ ca-certificates \ unzip \ + xz \ zip \ nano \ vim \ @@ -263,7 +284,6 @@ install_alpine_packages() { libstdc++ \ krb5-libs \ libintl \ - libssl1.1 \ lttng-ust \ tzdata \ userspace-rcu \ @@ -277,6 +297,12 @@ install_alpine_packages() { shadow \ strace + # # Include libssl1.1 if available (not available for 3.19 and newer) + LIBSSL1_PKG=libssl1.1 + if [[ $(apk search --no-cache -a $LIBSSL1_PKG | grep $LIBSSL1_PKG) ]]; then + apk add --no-cache $LIBSSL1_PKG + fi + # Install man pages - package name varies between 3.12 and earlier versions if apk info man > /dev/null 2>&1; then apk add --no-cache man man-pages @@ -324,8 +350,9 @@ chmod +x /etc/profile.d/00-restore-env.sh # Get an adjusted ID independent of distro variants if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then ADJUSTED_ID="debian" -elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then +elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "azurelinux" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then ADJUSTED_ID="rhel" + VERSION_CODENAME="${ID}${VERSION_ID}" elif [ "${ID}" = "alpine" ]; then ADJUSTED_ID="alpine" else @@ -333,6 +360,22 @@ else exit 1 fi +if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then + # As of 1 July 2024, mirrorlist.centos.org no longer exists. + # Update the repo files to reference vault.centos.org. + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo +fi + +if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then + # As of 1 July 2024, mirrorlist.centos.org no longer exists. + # Update the repo files to reference vault.centos.org. + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo +fi + # Install packages for appropriate OS case "${ADJUSTED_ID}" in "debian") @@ -407,6 +450,9 @@ fi if [ "${USERNAME}" = "root" ]; then user_home="/root" +# Check if user already has a home directory other than /home/${USERNAME} +elif [ "/home/${USERNAME}" != $( getent passwd $USERNAME | cut -d: -f6 ) ]; then + user_home=$( getent passwd $USERNAME | cut -d: -f6 ) else user_home="/home/${USERNAME}" if [ ! -d "${user_home}" ]; then @@ -418,6 +464,7 @@ fi # Restore user .bashrc / .profile / .zshrc defaults from skeleton file if it doesn't exist or is empty possible_rc_files=( ".bashrc" ".profile" ) [ "$INSTALL_OH_MY_ZSH_CONFIG" == "true" ] && possible_rc_files+=('.zshrc') +[ "$INSTALL_ZSH" == "true" ] && possible_rc_files+=('.zprofile') for rc_file in "${possible_rc_files[@]}"; do if [ -f "/etc/skel/${rc_file}" ]; then if [ ! -e "${user_home}/${rc_file}" ] || [ ! -s "${user_home}/${rc_file}" ]; then @@ -453,13 +500,19 @@ fi # Optionally configure zsh and Oh My Zsh! if [ "${INSTALL_ZSH}" = "true" ]; then + if [ ! -f "${user_home}/.zprofile" ]; then + touch "${user_home}/.zprofile" + echo 'source $HOME/.profile' >> "${user_home}/.zprofile" # TODO: Reconsider adding '.profile' to '.zprofile' + chown ${USERNAME}:${group_name} "${user_home}/.zprofile" + fi + if [ "${ZSH_ALREADY_INSTALLED}" != "true" ]; then if [ "${ADJUSTED_ID}" = "rhel" ]; then global_rc_path="/etc/zshrc" else global_rc_path="/etc/zsh/zshrc" fi - cat "${FEATURE_DIR}/scripts/rc_snippet.sh" >> /etc/zshrc + cat "${FEATURE_DIR}/scripts/rc_snippet.sh" >> ${global_rc_path} ZSH_ALREADY_INSTALLED="true" fi @@ -504,7 +557,9 @@ if [ "${INSTALL_ZSH}" = "true" ]; then # Add devcontainer .zshrc template if [ "$INSTALL_OH_MY_ZSH_CONFIG" = "true" ]; then - echo -e "$(cat "${template_path}")\nDISABLE_AUTO_UPDATE=true\nDISABLE_UPDATE_PROMPT=true" > ${user_rc_file} + if ! [ -f "${template_path}" ] || ! grep -qF "$(head -n 1 "${template_path}")" "${user_rc_file}"; then + echo -e "$(cat "${template_path}")\nzstyle ':omz:update' mode disabled" > ${user_rc_file} + fi sed -i -e 's/ZSH_THEME=.*/ZSH_THEME="devcontainers"/g' ${user_rc_file} fi @@ -537,7 +592,7 @@ chmod +rx /usr/local/bin/code # systemctl shim for Debian/Ubuntu - tells people to use 'service' if systemd is not running if [ "${ADJUSTED_ID}" = "debian" ]; then - cp -f "${FEATURE_DIR}/bin/systemctl" /usr/local/bin/systemctl + cp -fL "${FEATURE_DIR}/bin/systemctl" /usr/local/bin/systemctl chmod +rx /usr/local/bin/systemctl fi diff --git a/src/common-utils/scripts/bash_theme_snippet.sh b/src/common-utils/scripts/bash_theme_snippet.sh index a028e4b63..ab76ada33 100644 --- a/src/common-utils/scripts/bash_theme_snippet.sh +++ b/src/common-utils/scripts/bash_theme_snippet.sh @@ -1,14 +1,13 @@ - # bash theme - partly inspired by https://github.com/ohmyzsh/ohmyzsh/blob/master/themes/robbyrussell.zsh-theme __bash_prompt() { local userpart='`export XIT=$? \ - && [ ! -z "${GITHUB_USER}" ] && echo -n "\[\033[0;32m\]@${GITHUB_USER} " || echo -n "\[\033[0;32m\]\u " \ + && [ ! -z "${GITHUB_USER:-}" ] && echo -n "\[\033[0;32m\]@${GITHUB_USER:-} " || echo -n "\[\033[0;32m\]\u " \ && [ "$XIT" -ne "0" ] && echo -n "\[\033[1;31m\]➜" || echo -n "\[\033[0m\]➜"`' local gitbranch='`\ if [ "$(git config --get devcontainers-theme.hide-status 2>/dev/null)" != 1 ] && [ "$(git config --get codespaces-theme.hide-status 2>/dev/null)" != 1 ]; then \ - export BRANCH=$(git --no-optional-locks symbolic-ref --short HEAD 2>/dev/null || git --no-optional-locks rev-parse --short HEAD 2>/dev/null); \ - if [ "${BRANCH}" != "" ]; then \ - echo -n "\[\033[0;36m\](\[\033[1;31m\]${BRANCH}" \ + export BRANCH="$(git --no-optional-locks symbolic-ref --short HEAD 2>/dev/null || git --no-optional-locks rev-parse --short HEAD 2>/dev/null)"; \ + if [ "${BRANCH:-}" != "" ]; then \ + echo -n "\[\033[0;36m\](\[\033[1;31m\]${BRANCH:-}" \ && if [ "$(git config --get devcontainers-theme.show-dirty 2>/dev/null)" = 1 ] && \ git --no-optional-locks ls-files --error-unmatch -m --directory --no-empty-directory -o --exclude-standard ":/*" > /dev/null 2>&1; then \ echo -n " \[\033[1;33m\]✗"; \ @@ -23,3 +22,23 @@ __bash_prompt() { } __bash_prompt export PROMPT_DIRTRIM=4 + +# Check if the terminal is xterm +if [[ "$TERM" == "xterm" ]]; then + # Function to set the terminal title to the current command + preexec() { + local cmd="${BASH_COMMAND}" + echo -ne "\033]0;${USER}@${HOSTNAME}: ${cmd}\007" + } + + # Function to reset the terminal title to the shell type after the command is executed + precmd() { + echo -ne "\033]0;${USER}@${HOSTNAME}: ${SHELL}\007" + } + + # Trap DEBUG signal to call preexec before each command + trap 'preexec' DEBUG + + # Append to PROMPT_COMMAND to call precmd before displaying the prompt + PROMPT_COMMAND="${PROMPT_COMMAND:+$PROMPT_COMMAND; }precmd" +fi diff --git a/src/common-utils/scripts/devcontainers.zsh-theme b/src/common-utils/scripts/devcontainers.zsh-theme index ff11c917e..0cfd70e65 100644 --- a/src/common-utils/scripts/devcontainers.zsh-theme +++ b/src/common-utils/scripts/devcontainers.zsh-theme @@ -1,7 +1,7 @@ # Oh My Zsh! theme - partly inspired by https://github.com/ohmyzsh/ohmyzsh/blob/master/themes/robbyrussell.zsh-theme __zsh_prompt() { local prompt_username - if [ ! -z "${GITHUB_USER}" ]; then + if [ ! -z "${GITHUB_USER}" ]; then prompt_username="@${GITHUB_USER}" else prompt_username="%n" @@ -24,3 +24,22 @@ __zsh_prompt() { unset -f __zsh_prompt } __zsh_prompt + +# Check if the terminal is xterm +if [[ "$TERM" == "xterm" ]]; then + # Function to set the terminal title to the current command + preexec() { + local cmd=${1} + echo -ne "\033]0;${USER}@${HOSTNAME}: ${cmd}\007" + } + + # Function to reset the terminal title to the shell type after the command is executed + precmd() { + echo -ne "\033]0;${USER}@${HOSTNAME}: ${SHELL}\007" + } + + # Add the preexec and precmd functions to the corresponding hooks + autoload -Uz add-zsh-hook + add-zsh-hook preexec preexec + add-zsh-hook precmd precmd +fi diff --git a/src/common-utils/scripts/rc_snippet.sh b/src/common-utils/scripts/rc_snippet.sh index 4810cd993..f3f36a4b8 100644 --- a/src/common-utils/scripts/rc_snippet.sh +++ b/src/common-utils/scripts/rc_snippet.sh @@ -1,4 +1,3 @@ - if [ -z "${USER}" ]; then export USER=$(whoami); fi if [[ "${PATH}" != *"$HOME/.local/bin"* ]]; then export PATH="${PATH}:$HOME/.local/bin"; fi @@ -17,9 +16,9 @@ fi # Set the default git editor if not already set if [ -z "$(git config --get core.editor)" ] && [ -z "${GIT_EDITOR}" ]; then if [ "${TERM_PROGRAM}" = "vscode" ]; then - if [[ -n $(command -v code-insiders) && -z $(command -v code) ]]; then + if [[ -n $(command -v code-insiders) && -z $(command -v code) ]]; then export GIT_EDITOR="code-insiders --wait" - else + else export GIT_EDITOR="code --wait" fi fi diff --git a/src/conda/devcontainer-feature.json b/src/conda/devcontainer-feature.json index 9a2365253..163696a20 100644 --- a/src/conda/devcontainer-feature.json +++ b/src/conda/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "conda", - "version": "1.0.9", + "version": "1.0.10", "name": "Conda", "description": "A cross-platform, language-agnostic binary package manager", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/conda", @@ -26,6 +26,17 @@ "CONDA_SCRIPT":"/opt/conda/etc/profile.d/conda.sh", "PATH": "/opt/conda/bin:${PATH}" }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the conda package manager pre-installed and available on the `PATH` for data science and Python development. Additional packages installed using Conda will be downloaded from Anaconda or another repository configured by the user. A user can install different versions of Python than the one in this dev container by running a command like: conda install python=3.7" + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] diff --git a/src/desktop-lite/NOTES.md b/src/desktop-lite/NOTES.md index 924ce9b6e..ec9ec71d4 100644 --- a/src/desktop-lite/NOTES.md +++ b/src/desktop-lite/NOTES.md @@ -19,7 +19,7 @@ To set up the `6080` port from your `devcontainer.json` file, include the follow You can also connect to the desktop using a [VNC viewer](https://www.realvnc.com/en/connect/download/viewer/). To do so: 1. Connect to the environment from a desktop tool that supports the dev container spec (e.g., VS Code client). -1. Forward the VNC server port (`5901` by default) to your local machine using either the `forwardPorts` property in `devcontainer.json` or the user interface in your tool (e.g., you can press F1 or Ctrl/Cmd+Shift+P and select **Ports: Focus on Ports View** in VS Code to bring it into focus). +1. Forward the VNC server port (`5901` by default) to your local machine using either the `forwardPorts` property in `devcontainer.json` or the user interface in your tool (e.g., you can press F1 or Ctrl/Cmd+Shift+P and select **Ports: Focus on Ports View** in VS Code to bring it into focus). If you are using the [Dev Container CLI](https://github.com/devcontainers/cli), you should instead use the `appPort` property in `devcontainer.json`. 1. Start your VNC Viewer and connect to localhost:5901. Note that you may need to bump up the color depth to 24 bits to see full color. 1. Enter the desktop password (`vscode` by default). diff --git a/src/desktop-lite/README.md b/src/desktop-lite/README.md index 7094a2425..a70997405 100644 --- a/src/desktop-lite/README.md +++ b/src/desktop-lite/README.md @@ -16,10 +16,10 @@ Adds a lightweight Fluxbox based desktop to the container that can be accessed u | Options Id | Description | Type | Default Value | |-----|-----|-----|-----| | version | Currently Unused! | string | latest | -| noVncVersion | NoVnc Version | string | 1.2.0 | -| password | Enter a password for desktop connections | string | vscode | -| webPort | Enter a port for the VNC web client | string | 6080 | -| vncPort | Enter a port for the desktop VNC server | string | 5901 | +| noVncVersion | The noVNC version to use | string | 1.2.0 | +| password | Enter a password for desktop connections. If "noPassword", connections from the local host can be established without entering a password | string | vscode | +| webPort | Enter a port for the VNC web client (noVNC) | string | 6080 | +| vncPort | Enter a port for the desktop VNC server (TigerVNC) | string | 5901 | ## Connecting to the desktop @@ -42,7 +42,7 @@ To set up the `6080` port from your `devcontainer.json` file, include the follow You can also connect to the desktop using a [VNC viewer](https://www.realvnc.com/en/connect/download/viewer/). To do so: 1. Connect to the environment from a desktop tool that supports the dev container spec (e.g., VS Code client). -1. Forward the VNC server port (`5901` by default) to your local machine using either the `forwardPorts` property in `devcontainer.json` or the user interface in your tool (e.g., you can press F1 or Ctrl/Cmd+Shift+P and select **Ports: Focus on Ports View** in VS Code to bring it into focus). +1. Forward the VNC server port (`5901` by default) to your local machine using either the `forwardPorts` property in `devcontainer.json` or the user interface in your tool (e.g., you can press F1 or Ctrl/Cmd+Shift+P and select **Ports: Focus on Ports View** in VS Code to bring it into focus). If you are using the [Dev Container CLI](https://github.com/devcontainers/cli), you should instead use the `appPort` property in `devcontainer.json`. 1. Start your VNC Viewer and connect to localhost:5901. Note that you may need to bump up the color depth to 24 bits to see full color. 1. Enter the desktop password (`vscode` by default). diff --git a/src/desktop-lite/devcontainer-feature.json b/src/desktop-lite/devcontainer-feature.json index 5387138b5..ae10c9977 100644 --- a/src/desktop-lite/devcontainer-feature.json +++ b/src/desktop-lite/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "desktop-lite", - "version": "1.0.8", + "version": "1.2.8", "name": "Light-weight Desktop", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/desktop-lite", "description": "Adds a lightweight Fluxbox based desktop to the container that can be accessed using a VNC viewer or the web. GUI-based commands executed from the built-in VS code terminal will open on the desktop automatically.", @@ -16,20 +16,21 @@ "noVncVersion": { "type": "string", "proposals": [ - "1.2.0" + "1.6.0" ], - "default": "1.2.0", - "description": "NoVnc Version" + "default": "1.6.0", + "description": "The noVNC version to use" }, "password": { "type": "string", "proposals": [ "vscode", "codespaces", - "password" + "password", + "noPassword" ], "default": "vscode", - "description": "Enter a password for desktop connections" + "description": "Enter a password for desktop connections. If \"noPassword\", connections from the local host can be established without entering a password" }, "webPort": { "type": "string", @@ -37,7 +38,7 @@ "6080" ], "default": "6080", - "description": "Enter a port for the VNC web client" + "description": "Enter a port for the VNC web client (noVNC)" }, "vncPort": { "type": "string", @@ -45,7 +46,7 @@ "5901" ], "default": "5901", - "description": "Enter a port for the desktop VNC server" + "description": "Enter a port for the desktop VNC server (TigerVNC)" } }, "init": true, @@ -53,6 +54,17 @@ "containerEnv": { "DISPLAY": ":1" }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes a lightweight Fluxbox based desktop that can be accessed using a VNC viewer or the web. GUI-based commands executed from the built-in VS Code terminal will open on the desktop automatically." + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] diff --git a/src/desktop-lite/install.sh b/src/desktop-lite/install.sh index df4390eca..4575cc4f9 100755 --- a/src/desktop-lite/install.sh +++ b/src/desktop-lite/install.sh @@ -7,8 +7,11 @@ # Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/desktop-lite.md # Maintainer: The VS Code and Codespaces Teams -NOVNC_VERSION="${NOVNCVERSION:-"1.2.0"}" # TODO: Add in a 'latest' auto-detect and swap name to 'version' +NOVNC_VERSION="${NOVNCVERSION:-"1.6.0"}" # TODO: Add in a 'latest' auto-detect and swap name to 'version' VNC_PASSWORD=${PASSWORD:-"vscode"} +if [ "$VNC_PASSWORD" = "noPassword" ]; then + unset VNC_PASSWORD +fi NOVNC_PORT="${WEBPORT:-6080}" VNC_PORT="${VNCPORT:-5901}" @@ -28,7 +31,6 @@ package_list=" fbautostart \ at-spi2-core \ xterm \ - eterm \ nautilus\ mousepad \ seahorse \ @@ -41,7 +43,6 @@ package_list=" libnotify4 \ libnss3 \ libxss1 \ - libasound2 \ xfonts-base \ xfonts-terminus \ fonts-noto \ @@ -198,6 +199,16 @@ fi # Install X11, fluxbox and VS Code dependencies check_packages ${package_list} +# if Ubuntu-24.04, noble(numbat) / Debian-13, trixie found, then will install libasound2-dev instead of libasound2. +# this change is temporary, https://packages.ubuntu.com/noble/libasound2 will switch to libasound2 once it is available for Ubuntu-24.04, noble(numbat) +. /etc/os-release +if { [ "${ID}" = "ubuntu" ] && [ "${VERSION_CODENAME}" = "noble" ]; } || { [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; }; then + echo "Detected Noble (Ubuntu 24.04) or Trixie (Debian). Installing libasound2-dev package..." + check_packages "libasound2-dev" +else + check_packages "libasound2" +fi + # On newer versions of Ubuntu (22.04), # we need an additional package that isn't provided in earlier versions if ! type vncpasswd > /dev/null 2>&1; then @@ -288,11 +299,11 @@ user_name="${USERNAME}" group_name="$(id -gn ${USERNAME})" LOG=/tmp/container-init.log -export DBUS_SESSION_BUS_ADDRESS="${DBUS_SESSION_BUS_ADDRESS:-"autolaunch:"}" -export DISPLAY="${DISPLAY:-:1}" -export VNC_RESOLUTION="${VNC_RESOLUTION:-1440x768x16}" -export LANG="${LANG:-"en_US.UTF-8"}" -export LANGUAGE="${LANGUAGE:-"en_US.UTF-8"}" +export DBUS_SESSION_BUS_ADDRESS="\${DBUS_SESSION_BUS_ADDRESS:-"autolaunch:"}" +export DISPLAY="\${DISPLAY:-:1}" +export VNC_RESOLUTION="\${VNC_RESOLUTION:-1440x768x16}" +export LANG="\${LANG:-"en_US.UTF-8"}" +export LANGUAGE="\${LANGUAGE:-"en_US.UTF-8"}" # Execute the command it not already running startInBackgroundIfNotRunning() @@ -343,6 +354,13 @@ log() echo -e "[\$(date)] \$@" | sudoIf tee -a \$LOG > /dev/null } +# Function to compare versions +version_gt() { + # returns 0 if \$1 > \$2 + [ "\$(printf '%s\n' "\$2" "\$1" | sort -V | head -n1)" != "\$1" ] +} + + log "** SCRIPT START **" # Start dbus. @@ -363,23 +381,46 @@ sudoIf chown root:\${group_name} /tmp/.X11-unix if [ "\$(echo "\${VNC_RESOLUTION}" | tr -cd 'x' | wc -c)" = "1" ]; then VNC_RESOLUTION=\${VNC_RESOLUTION}x16; fi screen_geometry="\${VNC_RESOLUTION%*x*}" screen_depth="\${VNC_RESOLUTION##*x}" -startInBackgroundIfNotRunning "Xtigervnc" sudoUserIf "tigervncserver \${DISPLAY} -geometry \${screen_geometry} -depth \${screen_depth} -rfbport ${VNC_PORT} -dpi \${VNC_DPI:-96} -localhost -desktop fluxbox -fg -passwd /usr/local/etc/vscode-dev-containers/vnc-passwd" + +# Check if VNC_PASSWORD is set and use the appropriate command +common_options="tigervncserver \${DISPLAY} -geometry \${screen_geometry} -depth \${screen_depth} -rfbport ${VNC_PORT} -dpi \${VNC_DPI:-96} -localhost -desktop fluxbox -fg" + +if [ -n "\${VNC_PASSWORD+x}" ]; then + startInBackgroundIfNotRunning "Xtigervnc" sudoUserIf "\${common_options} -passwd /usr/local/etc/vscode-dev-containers/vnc-passwd" +else + startInBackgroundIfNotRunning "Xtigervnc" sudoUserIf "\${common_options} -SecurityTypes None" +fi # Spin up noVNC if installed and not running. -if [ -d "/usr/local/novnc" ] && [ "\$(ps -ef | grep /usr/local/novnc/noVNC*/utils/launch.sh | grep -v grep)" = "" ]; then - keepRunningInBackground "noVNC" sudoIf "/usr/local/novnc/noVNC*/utils/launch.sh --listen ${NOVNC_PORT} --vnc localhost:${VNC_PORT}" - log "noVNC started." +if [ -d "/usr/local/novnc" ]; then + if [ "\$(ps -ef | grep /usr/local/novnc/noVNC*/utils/launch.sh | grep -v grep)" = "" ] && [ "\$(ps -ef | grep /usr/local/novnc/noVNC*/utils/novnc_proxy | grep -v grep)" = "" ]; then + if version_gt "${NOVNC_VERSION}" "1.2.0"; then + keepRunningInBackground "noVNC" sudoIf "/usr/local/novnc/noVNC*/utils/novnc_proxy --listen ${NOVNC_PORT} --vnc localhost:${VNC_PORT}" + log "noVNC started with novnc_proxy." + else + keepRunningInBackground "noVNC" sudoIf "/usr/local/novnc/noVNC*/utils/launch.sh --listen ${NOVNC_PORT} --vnc localhost:${VNC_PORT}" + log "noVNC started with launch.sh." + fi + else + log "noVNC is already running." + fi else - log "noVNC is already running or not installed." + log "noVNC is not installed." fi # Run whatever was passed in -log "Executing \"\$@\"." -exec "\$@" +if [ -n "$1" ]; then + log "Executing \"\$@\"." + exec "$@" +else + log "No command provided to execute." +fi log "** SCRIPT EXIT **" EOF -echo "${VNC_PASSWORD}" | vncpasswd -f > /usr/local/etc/vscode-dev-containers/vnc-passwd +if [ -n "${VNC_PASSWORD+x}" ]; then + echo "${VNC_PASSWORD}" | vncpasswd -f > /usr/local/etc/vscode-dev-containers/vnc-passwd +fi chmod +x /usr/local/share/desktop-init.sh /usr/local/bin/set-resolution # Set up fluxbox config @@ -392,15 +433,23 @@ fi # Clean up rm -rf /var/lib/apt/lists/* +# Determine the message based on whether VNC_PASSWORD is set +if [ -n "${VNC_PASSWORD+x}" ]; then + PASSWORD_MESSAGE="In both cases, use the password \"${VNC_PASSWORD}\" when connecting" +else + PASSWORD_MESSAGE="In both cases, no password is required." +fi + +# Display the message cat << EOF You now have a working desktop! Connect to in one of the following ways: -- Forward port ${NOVNC_PORT} and use a web browser start the noVNC client (recommended) +- Forward port ${NOVNC_PORT} and use a web browser to start the noVNC client (recommended) - Forward port ${VNC_PORT} using VS Code client and connect using a VNC Viewer -In both cases, use the password "${VNC_PASSWORD}" when connecting +${PASSWORD_MESSAGE} (*) Done! diff --git a/src/docker-in-docker/NOTES.md b/src/docker-in-docker/NOTES.md index b8156f8b6..c7fb26137 100644 --- a/src/docker-in-docker/NOTES.md +++ b/src/docker-in-docker/NOTES.md @@ -13,4 +13,6 @@ This docker-in-docker Dev Container Feature is roughly based on the [official do This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04). + `bash` is required to execute the `install.sh` script. diff --git a/src/docker-in-docker/README.md b/src/docker-in-docker/README.md index bfca77357..3fdbcc312 100644 --- a/src/docker-in-docker/README.md +++ b/src/docker-in-docker/README.md @@ -17,16 +17,19 @@ Create child containers *inside* a container, independent from the host's docker |-----|-----|-----|-----| | version | Select or enter a Docker/Moby Engine version. (Availability can vary by OS version.) | string | latest | | moby | Install OSS Moby build instead of Docker CE | boolean | true | -| dockerDashComposeVersion | Default version of Docker Compose (v1 or v2 or none) | string | v1 | +| mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest | +| dockerDashComposeVersion | Default version of Docker Compose (v1, v2 or none) | string | v2 | | azureDnsAutoDetection | Allow automatically setting the dockerd DNS server when the installation script detects it is running in Azure | boolean | true | | dockerDefaultAddressPool | Define default address pools for Docker networks. e.g. base=192.168.0.0/16,size=24 | string | - | | installDockerBuildx | Install Docker Buildx | boolean | true | +| installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | true | +| disableIp6tables | Disable ip6tables (this option is only applicable for Docker versions 27 and greater) | boolean | false | ## Customizations ### VS Code Extensions -- `ms-azuretools.vscode-docker` +- `ms-azuretools.vscode-containers` ## Limitations diff --git a/src/docker-in-docker/devcontainer-feature.json b/src/docker-in-docker/devcontainer-feature.json index 2feea7d99..0450c664e 100644 --- a/src/docker-in-docker/devcontainer-feature.json +++ b/src/docker-in-docker/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "docker-in-docker", - "version": "2.5.0", + "version": "2.12.4", "name": "Docker (Docker-in-Docker)", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-in-docker", "description": "Create child containers *inside* a container, independent from the host's docker instance. Installs Docker extension in the container along with needed CLIs.", @@ -20,6 +20,11 @@ "default": true, "description": "Install OSS Moby build instead of Docker CE" }, + "mobyBuildxVersion": { + "type": "string", + "default": "latest", + "description": "Install a specific version of moby-buildx when using Moby" + }, "dockerDashComposeVersion": { "type": "string", "enum": [ @@ -27,8 +32,8 @@ "v1", "v2" ], - "default": "v1", - "description": "Default version of Docker Compose (v1 or v2 or none)" + "default": "v2", + "description": "Default version of Docker Compose (v1, v2 or none)" }, "azureDnsAutoDetection": { "type": "boolean", @@ -45,6 +50,16 @@ "type": "boolean", "default": true, "description": "Install Docker Buildx" + }, + "installDockerComposeSwitch": { + "type": "boolean", + "default": true, + "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter." + }, + "disableIp6tables": { + "type": "boolean", + "default": false, + "description": "Disable ip6tables (this option is only applicable for Docker versions 27 and greater)" } }, "entrypoint": "/usr/local/share/docker-init.sh", @@ -55,8 +70,15 @@ "customizations": { "vscode": { "extensions": [ - "ms-azuretools.vscode-docker" - ] + "ms-azuretools.vscode-containers" + ], + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using a dedicated Docker daemon running inside the dev container." + } + ] + } } }, "mounts": [ diff --git a/src/docker-in-docker/install.sh b/src/docker-in-docker/install.sh index 39d1b77ca..46f1045ae 100755 --- a/src/docker-in-docker/install.sh +++ b/src/docker-in-docker/install.sh @@ -10,14 +10,18 @@ DOCKER_VERSION="${VERSION:-"latest"}" # The Docker/Moby Engine + CLI should match in version USE_MOBY="${MOBY:-"true"}" -DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v1"}" # v1 or v2 or none +MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}" +DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" #v1, v2 or none AZURE_DNS_AUTO_DETECTION="${AZUREDNSAUTODETECTION:-"true"}" -DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL}" +DOCKER_DEFAULT_ADDRESS_POOL="${DOCKERDEFAULTADDRESSPOOL:-""}" USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}" INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}" +INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"true"}" MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc" -DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="bookworm buster bullseye bionic focal jammy" -DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="bookworm buster bullseye bionic focal hirsute impish jammy" +MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc" +DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble" +DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble" +DISABLE_IP6_TABLES="${DISABLEIP6TABLES:-false}" # Default: Exit on any failure. set -e @@ -57,21 +61,6 @@ elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then USERNAME=root fi -# Get central common setting -get_common_setting() { - if [ "${common_settings_file_loaded}" != "true" ]; then - curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping." - common_settings_file_loaded=true - fi - if [ -f "/tmp/vsdc-settings.env" ]; then - local multi_line="" - if [ "$2" = "true" ]; then multi_line="-z"; fi - local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')" - if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi - fi - echo "$1=${!1}" -} - apt_get_update() { if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then @@ -96,7 +85,7 @@ find_version_from_git_tags() { local repository=$2 local prefix=${3:-"tags/v"} local separator=${4:-"."} - local last_part_optional=${5:-"false"} + local last_part_optional=${5:-"false"} if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then local escaped_separator=${separator//./\\.} local last_part @@ -122,6 +111,77 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + +# Function to fetch the version released prior to the latest version +get_previous_version() { + local url=$1 + local repo_url=$2 + local variable_name=$3 + prev_version=${!variable_name} + + output=$(curl -s "$repo_url"); + if echo "$output" | jq -e 'type == "object"' > /dev/null; then + message=$(echo "$output" | jq -r '.message') + + if [[ $message == "API rate limit exceeded"* ]]; then + echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}" + echo -e "\nAttempting to find latest version using GitHub tags." + find_prev_version_from_git_tags prev_version "$url" "tags/v" + declare -g ${variable_name}="${prev_version}" + fi + elif echo "$output" | jq -e 'type == "array"' > /dev/null; then + echo -e "\nAttempting to find latest version using GitHub Api." + version=$(echo "$output" | jq -r '.[1].tag_name') + declare -g ${variable_name}="${version#v}" + fi + echo "${variable_name}=${!variable_name}" +} + +get_github_api_repo_url() { + local url=$1 + echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases" +} + ########################################### # Start docker-in-docker installation ########################################### @@ -135,28 +195,32 @@ export DEBIAN_FRONTEND=noninteractive # Fetch host/container arch. architecture="$(dpkg --print-architecture)" +# Prevent attempting to install Moby on Debian trixie (packages removed) +if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then + err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution." + err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')." + exit 1 +fi + # Check if distro is supported if [ "${USE_MOBY}" = "true" ]; then - # 'get_common_setting' allows attribute to be updated remotely - get_common_setting DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution" - err "Support distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" + err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" exit 1 fi echo "Distro codename '${VERSION_CODENAME}' matched filter '${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}'" else - get_common_setting DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution" - err "Support distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" + err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" exit 1 fi echo "Distro codename '${VERSION_CODENAME}' matched filter '${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}'" fi # Install dependencies -check_packages apt-transport-https curl ca-certificates pigz iptables gnupg2 dirmngr wget +check_packages apt-transport-https curl ca-certificates pigz iptables gnupg2 dirmngr wget jq if ! type git > /dev/null 2>&1; then check_packages git fi @@ -177,8 +241,10 @@ if [ "${USE_MOBY}" = "true" ]; then cli_package_name="moby-cli" # Import key safely and import Microsoft apt repo - get_common_setting MICROSOFT_GPG_KEYS_URI - curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg + { + curl -sSL ${MICROSOFT_GPG_KEYS_URI} + curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI} + } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list else # Name of licensed engine/cli @@ -217,6 +283,27 @@ else echo "cli_version_suffix ${cli_version_suffix}" fi +# Version matching for moby-buildx +if [ "${USE_MOBY}" = "true" ]; then + if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then + # Empty, meaning grab whatever "latest" is in apt repo + buildx_version_suffix="" + else + buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}" + buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}" + buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)" + set +e + buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")" + set -e + if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then + err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:" + apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+' + exit 1 + fi + echo "buildx_version_suffix ${buildx_version_suffix}" + fi +fi + # Install Docker / Moby CLI if not already installed if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then echo "Docker / Moby CLI and Engine already installed." @@ -224,92 +311,124 @@ else if [ "${USE_MOBY}" = "true" ]; then # Install engine set +e # Handle error gracefully - apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx moby-engine${engine_version_suffix} - if [ $? -ne 0 ]; then - err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-20.04')." - exit 1 - fi - set -e + apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx${buildx_version_suffix} moby-engine${engine_version_suffix} + exit_code=$? + set -e + + if [ ${exit_code} -ne 0 ]; then + err "Packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')." + exit 1 + fi # Install compose apt-get -y install --no-install-recommends moby-compose || err "Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping." else apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix} # Install compose + apt-mark hold docker-ce docker-ce-cli apt-get -y install --no-install-recommends docker-compose-plugin || echo "(*) Package docker-compose-plugin (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping." fi fi echo "Finished installing docker / moby!" +docker_home="/usr/libexec/docker" +cli_plugins_dir="${docker_home}/cli-plugins" + +# fallback for docker-compose +fallback_compose(){ + local url=$1 + local repo_url=$(get_github_api_repo_url "$url") + echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..." + get_previous_version "${url}" "${repo_url}" compose_version + echo -e "\nAttempting to install v${compose_version}" + curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} +} + # If 'docker-compose' command is to be included if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then - # Install Docker Compose if not already installed and is on a supported architecture - if type docker-compose > /dev/null 2>&1; then - echo "Docker Compose v1 already installed." - else - target_compose_arch="${architecture}" - if [ "${target_compose_arch}" = "amd64" ]; then - target_compose_arch="x86_64" - fi - if [ "${target_compose_arch}" != "x86_64" ]; then + case "${architecture}" in + amd64) target_compose_arch=x86_64 ;; + arm64) target_compose_arch=aarch64 ;; + *) + echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine." + exit 1 + esac + + docker_compose_path="/usr/local/bin/docker-compose" + if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then + err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk." + INSTALL_DOCKER_COMPOSE_SWITCH="false" + + if [ "${target_compose_arch}" = "x86_64" ]; then + echo "(*) Installing docker compose v1..." + curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path} + chmod +x ${docker_compose_path} + + # Download the SHA256 checksum + DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')" + echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum + sha256sum -c docker-compose.sha256sum --ignore-missing + elif [ "${VERSION_CODENAME}" = "bookworm" ]; then + err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2" + exit 1 + else # Use pip to get a version that runs on this architecture check_packages python3-minimal python3-pip libffi-dev python3-venv - export PIPX_HOME=/usr/local/pipx - mkdir -p ${PIPX_HOME} - export PIPX_BIN_DIR=/usr/local/bin - export PYTHONUSERBASE=/tmp/pip-tmp - export PIP_CACHE_DIR=/tmp/pip-tmp/cache - pipx_bin=pipx - if ! type pipx > /dev/null 2>&1; then - pip3 install --disable-pip-version-check --no-cache-dir --user pipx - pipx_bin=/tmp/pip-tmp/bin/pipx - fi - - set +e - ${pipx_bin} install --pip-args '--no-cache-dir --force-reinstall' docker-compose - exit_code=$? - set -e + echo "(*) Installing docker compose v1 via pip..." + export PYTHONUSERBASE=/usr/local + pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation + fi + else + compose_version=${DOCKER_DASH_COMPOSE_VERSION#v} + docker_compose_url="https://github.com/docker/compose" + find_version_from_git_tags compose_version "$docker_compose_url" "tags/v" + echo "(*) Installing docker-compose ${compose_version}..." + curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || { + echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..." + fallback_compose "$docker_compose_url" + } - if [ ${exit_code} -ne 0 ]; then - # Temporary: https://github.com/devcontainers/features/issues/616 - # See https://github.com/yaml/pyyaml/issues/601 - echo "(*) Failed to install docker-compose via pipx. Trying via pip3..." + chmod +x ${docker_compose_path} - export PYTHONUSERBASE=/usr/local - pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation - fi + # Download the SHA256 checksum + DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')" + echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum + sha256sum -c docker-compose.sha256sum --ignore-missing - rm -rf /tmp/pip-tmp - else - compose_v1_version="1" - find_version_from_git_tags compose_v1_version "https://github.com/docker/compose" "tags/" - echo "(*) Installing docker-compose ${compose_v1_version}..." - curl -fsSL "https://github.com/docker/compose/releases/download/${compose_v1_version}/docker-compose-Linux-x86_64" -o /usr/local/bin/docker-compose - chmod +x /usr/local/bin/docker-compose - fi + mkdir -p ${cli_plugins_dir} + cp ${docker_compose_path} ${cli_plugins_dir} fi +fi - # Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation - current_v1_compose_path="$(which docker-compose)" - target_v1_compose_path="$(dirname "${current_v1_compose_path}")/docker-compose-v1" - if ! type compose-switch > /dev/null 2>&1; then +# fallback method for compose-switch +fallback_compose-switch() { + local url=$1 + local repo_url=$(get_github_api_repo_url "$url") + echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..." + get_previous_version "$url" "$repo_url" compose_switch_version + echo -e "\nAttempting to install v${compose_switch_version}" + curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${architecture}" -o /usr/local/bin/compose-switch +} + +# Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation +if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then + if type docker-compose > /dev/null 2>&1; then echo "(*) Installing compose-switch..." + current_compose_path="$(which docker-compose)" + target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1" compose_switch_version="latest" - find_version_from_git_tags compose_switch_version "https://github.com/docker/compose-switch" - curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${architecture}" -o /usr/local/bin/compose-switch + compose_switch_url="https://github.com/docker/compose-switch" + find_version_from_git_tags compose_switch_version "$compose_switch_url" + curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${architecture}" -o /usr/local/bin/compose-switch || fallback_compose-switch "$compose_switch_url" chmod +x /usr/local/bin/compose-switch # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11 - # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2) - mv "${current_v1_compose_path}" "${target_v1_compose_path}" - update-alternatives --install /usr/local/bin/docker-compose docker-compose /usr/local/bin/compose-switch 99 - update-alternatives --install /usr/local/bin/docker-compose docker-compose "${target_v1_compose_path}" 1 - fi - if [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then - update-alternatives --set docker-compose "${target_v1_compose_path}" + mv "${current_compose_path}" "${target_compose_path}" + update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99 + update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1 else - update-alternatives --set docker-compose /usr/local/bin/compose-switch + err "Skipping installation of compose-switch as docker compose is unavailable..." fi fi @@ -328,21 +447,54 @@ fi usermod -aG docker ${USERNAME} +# fallback for docker/buildx +fallback_buildx() { + local url=$1 + local repo_url=$(get_github_api_repo_url "$url") + echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..." + get_previous_version "$url" "$repo_url" buildx_version + buildx_file_name="buildx-v${buildx_version}.linux-${architecture}" + echo -e "\nAttempting to install v${buildx_version}" + wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} +} + if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then buildx_version="latest" - find_version_from_git_tags buildx_version "https://github.com/docker/buildx" "refs/tags/v" - + docker_buildx_url="https://github.com/docker/buildx" + find_version_from_git_tags buildx_version "$docker_buildx_url" "refs/tags/v" echo "(*) Installing buildx ${buildx_version}..." buildx_file_name="buildx-v${buildx_version}.linux-${architecture}" - cd /tmp && wget "https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name}" + + cd /tmp + wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || fallback_buildx "$docker_buildx_url" + + docker_home="/usr/libexec/docker" + cli_plugins_dir="${docker_home}/cli-plugins" - mkdir -p ${_REMOTE_USER_HOME}/.docker/cli-plugins - mv ${buildx_file_name} ${_REMOTE_USER_HOME}/.docker/cli-plugins/docker-buildx - chmod +x ${_REMOTE_USER_HOME}/.docker/cli-plugins/docker-buildx + mkdir -p ${cli_plugins_dir} + mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx + chmod +x ${cli_plugins_dir}/docker-buildx + + chown -R "${USERNAME}:docker" "${docker_home}" + chmod -R g+r+w "${docker_home}" + find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s +fi - chown -R "${USERNAME}:docker" "${_REMOTE_USER_HOME}/.docker" - chmod -R g+r+w "${_REMOTE_USER_HOME}/.docker" - find "${_REMOTE_USER_HOME}/.docker" -type d -print0 | xargs -n 1 -0 chmod g+s +DOCKER_DEFAULT_IP6_TABLES="" +if [ "$DISABLE_IP6_TABLES" == true ]; then + requested_version="" + # checking whether the version requested either is in semver format or just a number denoting the major version + # and, extracting the major version number out of the two scenarios + semver_regex="^(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)\.(0|[1-9][0-9]*)(-([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?(\+([0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*))?$" + if echo "$DOCKER_VERSION" | grep -Eq $semver_regex; then + requested_version=$(echo $DOCKER_VERSION | cut -d. -f1) + elif echo "$DOCKER_VERSION" | grep -Eq "^[1-9][0-9]*$"; then + requested_version=$DOCKER_VERSION + fi + if [ "$DOCKER_VERSION" = "latest" ] || [[ -n "$requested_version" && "$requested_version" -ge 27 ]] ; then + DOCKER_DEFAULT_IP6_TABLES="--ip6tables=false" + echo "(!) As requested, passing '${DOCKER_DEFAULT_IP6_TABLES}'" + fi fi tee /usr/local/share/docker-init.sh > /dev/null \ @@ -357,13 +509,13 @@ set -e AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} +DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} EOF tee -a /usr/local/share/docker-init.sh > /dev/null \ << 'EOF' -dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} $(cat << 'INNEREOF' +dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAULT_ADDRESS_POOL=${DOCKER_DEFAULT_ADDRESS_POOL} DOCKER_DEFAULT_IP6_TABLES=${DOCKER_DEFAULT_IP6_TABLES} $(cat << 'INNEREOF' # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly - # ie: docker kill find /run /var/run -iname 'docker*.pid' -delete || : find /run /var/run -iname 'container*.pid' -delete || : @@ -415,7 +567,7 @@ dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAU retry_cgroup_nesting=`expr $retry_cgroup_nesting + 1` set -e - done + done # -- End: dind wrapper script -- @@ -440,15 +592,25 @@ dockerd_start="AZURE_DNS_AUTO_DETECTION=${AZURE_DNS_AUTO_DETECTION} DOCKER_DEFAU fi # Start docker/moby engine - ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL > /tmp/dockerd.log 2>&1 ) & + ( dockerd $CUSTOMDNS $DEFAULT_ADDRESS_POOL $DOCKER_DEFAULT_IP6_TABLES > /tmp/dockerd.log 2>&1 ) & INNEREOF )" +sudo_if() { + COMMAND="$*" + + if [ "$(id -u)" -ne 0 ]; then + sudo $COMMAND + else + $COMMAND + fi +} + retry_docker_start_count=0 docker_ok="false" until [ "${docker_ok}" = "true" ] || [ "${retry_docker_start_count}" -eq "5" ]; -do +do # Start using sudo if not invoked as root if [ "$(id -u)" -ne 0 ]; then sudo /bin/sh -c "${dockerd_start}" @@ -466,11 +628,15 @@ do retry_count=`expr $retry_count + 1` done - - if [ "${docker_ok}" != "true" ]; then + + if [ "${docker_ok}" != "true" ] && [ "${retry_docker_start_count}" != "4" ]; then echo "(*) Failed to start docker, retrying..." + set +e + sudo_if pkill dockerd + sudo_if pkill containerd + set -e fi - + retry_docker_start_count=`expr $retry_docker_start_count + 1` done diff --git a/src/docker-outside-of-docker/NOTES.md b/src/docker-outside-of-docker/NOTES.md index fede053ae..ca6f43114 100644 --- a/src/docker-outside-of-docker/NOTES.md +++ b/src/docker-outside-of-docker/NOTES.md @@ -36,7 +36,7 @@ services: - The defaults value `./` is added so that the `docker-compose.yaml` file can work when it is run outside of the container -### Change the workspace to `${localWorkspaceFolder}` +### 2. Change the workspace to `${localWorkspaceFolder}` - This is useful if we don't want to edit the `docker-compose.yaml` file @@ -58,4 +58,6 @@ services: This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +Debian Trixie (13) does not include moby-cli and related system packages, so the feature cannot install with "moby": "true". To use this feature on Trixie, please set "moby": "false" or choose a different base image (for example, Ubuntu 24.04). + `bash` is required to execute the `install.sh` script. diff --git a/src/docker-outside-of-docker/README.md b/src/docker-outside-of-docker/README.md index 94c891564..1794d42e8 100644 --- a/src/docker-outside-of-docker/README.md +++ b/src/docker-outside-of-docker/README.md @@ -19,14 +19,16 @@ Re-use the host docker socket, adding the Docker CLI to a container. Feature inv |-----|-----|-----|-----| | version | Select or enter a Docker/Moby CLI version. (Availability can vary by OS version.) | string | latest | | moby | Install OSS Moby build instead of Docker CE | boolean | true | +| mobyBuildxVersion | Install a specific version of moby-buildx when using Moby | string | latest | | dockerDashComposeVersion | Compose version to use for docker-compose (v1 or v2 or none) | string | v2 | | installDockerBuildx | Install Docker Buildx | boolean | true | +| installDockerComposeSwitch | Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter. | boolean | true | ## Customizations ### VS Code Extensions -- `ms-azuretools.vscode-docker` +- `ms-azuretools.vscode-containers` ## Limitations @@ -66,7 +68,7 @@ services: - The defaults value `./` is added so that the `docker-compose.yaml` file can work when it is run outside of the container -### Change the workspace to `${localWorkspaceFolder}` +### 2. Change the workspace to `${localWorkspaceFolder}` - This is useful if we don't want to edit the `docker-compose.yaml` file diff --git a/src/docker-outside-of-docker/devcontainer-feature.json b/src/docker-outside-of-docker/devcontainer-feature.json index d3123fabe..7314fa83d 100644 --- a/src/docker-outside-of-docker/devcontainer-feature.json +++ b/src/docker-outside-of-docker/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "docker-outside-of-docker", - "version": "1.3.0", + "version": "1.6.5", "name": "Docker (docker-outside-of-docker)", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/docker-outside-of-docker", "description": "Re-use the host docker socket, adding the Docker CLI to a container. Feature invokes a script to enable using a forwarded Docker socket within a container to run Docker commands.", @@ -20,6 +20,11 @@ "default": true, "description": "Install OSS Moby build instead of Docker CE" }, + "mobyBuildxVersion": { + "type": "string", + "default": "latest", + "description": "Install a specific version of moby-buildx when using Moby" + }, "dockerDashComposeVersion": { "type": "string", "enum": [ @@ -34,14 +39,26 @@ "type": "boolean", "default": true, "description": "Install Docker Buildx" + }, + "installDockerComposeSwitch": { + "type": "boolean", + "default": true, + "description": "Install Compose Switch (provided docker compose is available) which is a replacement to the Compose V1 docker-compose (python) executable. It translates the command line into Compose V2 docker compose then runs the latter." } }, "entrypoint": "/usr/local/share/docker-init.sh", "customizations": { "vscode": { "extensions": [ - "ms-azuretools.vscode-docker" - ] + "ms-azuretools.vscode-containers" + ], + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the Docker CLI (`docker`) pre-installed and available on the `PATH` for running and managing containers using the Docker daemon on the host machine." + } + ] + } } }, "mounts": [ @@ -51,6 +68,9 @@ "type": "bind" } ], + "securityOpt": [ + "label=disable" + ], "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ], diff --git a/src/docker-outside-of-docker/install.sh b/src/docker-outside-of-docker/install.sh index 7bf2138b4..74fd63530 100755 --- a/src/docker-outside-of-docker/install.sh +++ b/src/docker-outside-of-docker/install.sh @@ -9,23 +9,30 @@ DOCKER_VERSION="${VERSION:-"latest"}" USE_MOBY="${MOBY:-"true"}" -DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v1"}" # v1 or v2 or none +MOBY_BUILDX_VERSION="${MOBYBUILDXVERSION:-"latest"}" +DOCKER_DASH_COMPOSE_VERSION="${DOCKERDASHCOMPOSEVERSION:-"v2"}" # v1 or v2 or none ENABLE_NONROOT_DOCKER="${ENABLE_NONROOT_DOCKER:-"true"}" SOURCE_SOCKET="${SOURCE_SOCKET:-"/var/run/docker-host.sock"}" TARGET_SOCKET="${TARGET_SOCKET:-"/var/run/docker.sock"}" USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}" INSTALL_DOCKER_BUILDX="${INSTALLDOCKERBUILDX:-"true"}" - +INSTALL_DOCKER_COMPOSE_SWITCH="${INSTALLDOCKERCOMPOSESWITCH:-"true"}" MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc" -DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="buster bullseye bionic focal jammy" -DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="buster bullseye bionic focal hirsute impish jammy" +MICROSOFT_GPG_KEYS_ROLLING_URI="https://packages.microsoft.com/keys/microsoft-rolling.asc" +DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal jammy noble plucky" +DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES="trixie bookworm buster bullseye bionic focal hirsute impish jammy noble plucky" set -e # Clean up rm -rf /var/lib/apt/lists/* +# Setup STDERR. +err() { + echo "(!) $*" >&2 +} + if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' exit 1 @@ -48,21 +55,6 @@ elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then USERNAME=root fi -# Get central common setting -get_common_setting() { - if [ "${common_settings_file_loaded}" != "true" ]; then - curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping." - common_settings_file_loaded=true - fi - if [ -f "/tmp/vsdc-settings.env" ]; then - local multi_line="" - if [ "$2" = "true" ]; then multi_line="-z"; fi - local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')" - if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi - fi - echo "$1=${!1}" -} - apt_get_update() { if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then @@ -113,6 +105,88 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + +# Function to fetch the version released prior to the latest version +get_previous_version() { + local url=$1 + local repo_url=$2 + local variable_name=$3 + prev_version=${!variable_name} + + output=$(curl -s "$repo_url"); + + check_packages jq + + if echo "$output" | jq -e 'type == "object"' > /dev/null; then + message=$(echo "$output" | jq -r '.message') + if [[ $message == "API rate limit exceeded"* ]]; then + echo -e "\nAn attempt to find previous to latest version using GitHub Api Failed... \nReason: ${message}" + echo -e "\nAttempting to find previous to latest version using GitHub tags." + find_prev_version_from_git_tags prev_version "$url" "tags/v" + declare -g ${variable_name}="${prev_version}" + fi + elif echo "$output" | jq -e 'type == "array"' > /dev/null; then + echo -e "\nAttempting to find previous version using GitHub Api." + version=$(echo "$output" | jq -r '.[1].tag_name') + declare -g ${variable_name}="${version#v}" + fi + echo "${variable_name}=${!variable_name}" +} + +get_github_api_repo_url() { + local url=$1 + echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases" +} + +install_compose_switch_fallback() { + compose_switch_url=$1 + repo_url=$(get_github_api_repo_url "${compose_switch_url}") + echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..." + get_previous_version "${compose_switch_url}" "${repo_url}" compose_switch_version + echo -e "\nAttempting to install v${compose_switch_version}" + curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${architecture}" -o /usr/local/bin/compose-switch +} + # Ensure apt is in non-interactive to avoid prompts export DEBIAN_FRONTEND=noninteractive @@ -127,21 +201,25 @@ fi # Fetch host/container arch. architecture="$(dpkg --print-architecture)" +# Prevent attempting to install Moby on Debian trixie (packages removed) +if [ "${USE_MOBY}" = "true" ] && [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then + err "The 'moby' option is not supported on Debian 'trixie' because 'moby-cli' and related system packages have been removed from that distribution." + err "To continue, either set the feature option '\"moby\": false' or use a different base image (for example: 'debian:bookworm' or 'ubuntu-24.04')." + exit 1 +fi + # Check if distro is supported if [ "${USE_MOBY}" = "true" ]; then - # 'get_common_setting' allows attribute to be updated remotely - get_common_setting DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES if [[ "${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS distribution" - err "Support distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" + err "Supported distributions include: ${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}" exit 1 fi echo "Distro codename '${VERSION_CODENAME}' matched filter '${DOCKER_MOBY_ARCHIVE_VERSION_CODENAMES}'" else - get_common_setting DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES if [[ "${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" != *"${VERSION_CODENAME}"* ]]; then err "Unsupported distribution version '${VERSION_CODENAME}'. To resolve, please choose a compatible OS distribution" - err "Support distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" + err "Supported distributions include: ${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}" exit 1 fi echo "Distro codename '${VERSION_CODENAME}' matched filter '${DOCKER_LICENSED_ARCHIVE_VERSION_CODENAMES}'" @@ -153,8 +231,10 @@ if [ "${USE_MOBY}" = "true" ]; then cli_package_name="moby-cli" # Import key safely and import Microsoft apt repo - get_common_setting MICROSOFT_GPG_KEYS_URI - curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg + { + curl -sSL ${MICROSOFT_GPG_KEYS_URI} + curl -sSL ${MICROSOFT_GPG_KEYS_ROLLING_URI} + } | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list else # Name of proprietary engine package @@ -189,6 +269,40 @@ else echo "cli_version_suffix ${cli_version_suffix}" fi +# Version matching for moby-buildx +if [ "${USE_MOBY}" = "true" ]; then + if [ "${MOBY_BUILDX_VERSION}" = "latest" ]; then + # Empty, meaning grab whatever "latest" is in apt repo + buildx_version_suffix="" + else + buildx_version_dot_escaped="${MOBY_BUILDX_VERSION//./\\.}" + buildx_version_dot_plus_escaped="${buildx_version_dot_escaped//+/\\+}" + buildx_version_regex="^(.+:)?${buildx_version_dot_plus_escaped}([\\.\\+ ~:-]|$)" + set +e + buildx_version_suffix="=$(apt-cache madison moby-buildx | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${buildx_version_regex}")" + set -e + if [ -z "${buildx_version_suffix}" ] || [ "${buildx_version_suffix}" = "=" ]; then + err "No full or partial moby-buildx version match found for \"${MOBY_BUILDX_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:" + apt-cache madison moby-buildx | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+' + exit 1 + fi + echo "buildx_version_suffix ${buildx_version_suffix}" + fi +fi + + +docker_home="/usr/libexec/docker" +cli_plugins_dir="${docker_home}/cli-plugins" + +install_compose_fallback(){ + local url=$1 + local repo_url=$(get_github_api_repo_url "$url") + echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..." + get_previous_version "${url}" "${repo_url}" compose_version + echo -e "\nAttempting to install v${compose_version}" + curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} +} + # Install Docker / Moby CLI if not already installed if type docker > /dev/null 2>&1; then echo "Docker / Moby CLI already installed." @@ -196,9 +310,9 @@ else if [ "${USE_MOBY}" = "true" ]; then buildx=() if [ "${INSTALL_DOCKER_BUILDX}" = "true" ]; then - buildx=(moby-buildx) + buildx=(moby-buildx${buildx_version_suffix}) fi - apt-get -y install --no-install-recommends ${cli_package_name}${cli_version_suffix} "${buildx[@]}" + apt-get -y install --no-install-recommends ${cli_package_name}${cli_version_suffix} "${buildx[@]}" || { err "It seems packages for moby not available in OS ${ID} ${VERSION_CODENAME} (${architecture}). To resolve, either: (1) set feature option '\"moby\": false' , or (2) choose a compatible OS version (eg: 'ubuntu-24.04')." ; exit 1 ; } apt-get -y install --no-install-recommends moby-compose || echo "(*) Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping." else buildx=() @@ -218,43 +332,78 @@ fi # If 'docker-compose' command is to be included if [ "${DOCKER_DASH_COMPOSE_VERSION}" != "none" ]; then + case "${architecture}" in + amd64) target_compose_arch=x86_64 ;; + arm64) target_compose_arch=aarch64 ;; + *) + echo "(!) Docker outside of docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine." + exit 1 + esac + docker_compose_path="/usr/local/bin/docker-compose" # Install Docker Compose if not already installed and is on a supported architecture if type docker-compose > /dev/null 2>&1; then echo "Docker Compose already installed." elif [ "${DOCKER_DASH_COMPOSE_VERSION}" = "v1" ]; then - TARGET_COMPOSE_ARCH="$(uname -m)" - if [ "${TARGET_COMPOSE_ARCH}" = "amd64" ]; then - TARGET_COMPOSE_ARCH="x86_64" - fi - if [ "${TARGET_COMPOSE_ARCH}" != "x86_64" ]; then + err "The final Compose V1 release, version 1.29.2, was May 10, 2021. These packages haven't received any security updates since then. Use at your own risk." + INSTALL_DOCKER_COMPOSE_SWITCH="false" + + if [ "${target_compose_arch}" = "x86_64" ]; then + echo "(*) Installing docker compose v1..." + curl -fsSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64" -o ${docker_compose_path} + chmod +x ${docker_compose_path} + + # Download the SHA256 checksum + DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-Linux-x86_64.sha256" | awk '{print $1}')" + echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum + sha256sum -c docker-compose.sha256sum --ignore-missing + elif [ "${VERSION_CODENAME}" = "bookworm" ]; then + err "Docker compose v1 is unavailable for 'bookworm' on Arm64. Kindly switch to use v2" + exit 1 + else # Use pip to get a version that runs on this architecture check_packages python3-minimal python3-pip libffi-dev python3-venv - export PIPX_HOME=/usr/local/pipx - mkdir -p ${PIPX_HOME} - export PIPX_BIN_DIR=/usr/local/bin - export PYTHONUSERBASE=/tmp/pip-tmp - export PIP_CACHE_DIR=/tmp/pip-tmp/cache - pipx_bin=pipx - if ! type pipx > /dev/null 2>&1; then - pip3 install --disable-pip-version-check --no-cache-dir --user pipx - pipx_bin=/tmp/pip-tmp/bin/pipx - fi - ${pipx_bin} install --pip-args '--no-cache-dir --force-reinstall' docker-compose - rm -rf /tmp/pip-tmp - else - compose_v1_version="1" - find_version_from_git_tags compose_v1_version "https://github.com/docker/compose" "tags/" - echo "(*) Installing docker-compose ${compose_v1_version}..." - curl -fsSL "https://github.com/docker/compose/releases/download/${compose_v1_version}/docker-compose-Linux-x86_64" -o /usr/local/bin/docker-compose - chmod +x /usr/local/bin/docker-compose + echo "(*) Installing docker compose v1 via pip..." + export PYTHONUSERBASE=/usr/local + pip3 install --disable-pip-version-check --no-cache-dir --user "Cython<3.0" pyyaml wheel docker-compose --no-build-isolation fi else - echo "(*) Installing compose-switch as docker-compose..." + compose_version=${DOCKER_DASH_COMPOSE_VERSION#v} + docker_compose_url="https://github.com/docker/compose" + find_version_from_git_tags compose_version "$docker_compose_url" "tags/v" + echo "(*) Installing docker-compose ${compose_version}..." + curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || { + install_compose_fallback "$docker_compose_url" "$compose_version" "$target_compose_arch" "$docker_compose_path" + } + chmod +x ${docker_compose_path} + + # Download the SHA256 checksum + DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')" + echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum + sha256sum -c docker-compose.sha256sum --ignore-missing + + mkdir -p ${cli_plugins_dir} + cp ${docker_compose_path} ${cli_plugins_dir} + fi +fi + +# Install docker-compose switch if not already installed - https://github.com/docker/compose-switch#manual-installation +if [ "${INSTALL_DOCKER_COMPOSE_SWITCH}" = "true" ] && ! type compose-switch > /dev/null 2>&1; then + if type docker-compose > /dev/null 2>&1; then + echo "(*) Installing compose-switch..." + current_compose_path="$(which docker-compose)" + target_compose_path="$(dirname "${current_compose_path}")/docker-compose-v1" compose_switch_version="latest" - find_version_from_git_tags compose_switch_version "https://github.com/docker/compose-switch" - curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${architecture}" -o /usr/local/bin/docker-compose - chmod +x /usr/local/bin/docker-compose + compose_switch_url="https://github.com/docker/compose-switch" + find_version_from_git_tags compose_switch_version "${compose_switch_url}" + curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${architecture}" -o /usr/local/bin/compose-switch || install_compose_switch_fallback "${compose_switch_url}" + chmod +x /usr/local/bin/compose-switch # TODO: Verify checksum once available: https://github.com/docker/compose-switch/issues/11 + # Setup v1 CLI as alternative in addition to compose-switch (which maps to v2) + mv "${current_compose_path}" "${target_compose_path}" + update-alternatives --install ${docker_compose_path} docker-compose /usr/local/bin/compose-switch 99 + update-alternatives --install ${docker_compose_path} docker-compose "${target_compose_path}" 1 + else + err "Skipping installation of compose-switch as docker compose is unavailable..." fi fi @@ -348,7 +497,7 @@ if [ "${ENABLE_NONROOT_DOCKER}" = "true" ] && [ "${SOURCE_SOCKET}" != "${TARGET_ log "Enabling socket proxy." log "Proxying ${SOURCE_SOCKET} to ${TARGET_SOCKET} for vscode" sudoIf rm -rf ${TARGET_SOCKET} - (sudoIf socat UNIX-LISTEN:${TARGET_SOCKET},fork,mode=660,user=${USERNAME} UNIX-CONNECT:${SOURCE_SOCKET} 2>&1 | sudoIf tee -a \${SOCAT_LOG} > /dev/null & echo "\$!" | sudoIf tee \${SOCAT_PID} > /dev/null) + (sudoIf socat UNIX-LISTEN:${TARGET_SOCKET},fork,mode=660,user=${USERNAME},backlog=128 UNIX-CONNECT:${SOURCE_SOCKET} 2>&1 | sudoIf tee -a \${SOCAT_LOG} > /dev/null & echo "\$!" | sudoIf tee \${SOCAT_PID} > /dev/null) else log "Socket proxy already running." fi diff --git a/src/dotnet/NOTES.md b/src/dotnet/NOTES.md index 578aceaf3..ff52835b0 100644 --- a/src/dotnet/NOTES.md +++ b/src/dotnet/NOTES.md @@ -2,8 +2,7 @@ Installing only the latest .NET SDK version (the default). -``` json -{ +``` jsonc "features": { "ghcr.io/devcontainers/features/dotnet:2": "latest" // or "" or {} } @@ -11,10 +10,10 @@ Installing only the latest .NET SDK version (the default). Installing an additional SDK version. Multiple versions can be specified as comma-separated values. -``` json -{ +``` jsonc "features": { "ghcr.io/devcontainers/features/dotnet:2": { + "version": "latest", // (this can be omitted) "additionalVersions": "lts" } } @@ -23,7 +22,6 @@ Installing an additional SDK version. Multiple versions can be specified as comm Installing specific SDK versions. ``` json -{ "features": { "ghcr.io/devcontainers/features/dotnet:2": { "version": "6.0", @@ -35,7 +33,6 @@ Installing specific SDK versions. Installing a specific SDK feature band. ``` json -{ "features": { "ghcr.io/devcontainers/features/dotnet:2": { "version": "6.0.4xx", @@ -46,7 +43,6 @@ Installing a specific SDK feature band. Installing a specific SDK patch version. ``` json -{ "features": { "ghcr.io/devcontainers/features/dotnet:2": { "version": "6.0.412", @@ -57,7 +53,6 @@ Installing a specific SDK patch version. Installing only the .NET Runtime or the ASP.NET Core Runtime. (The SDK includes all runtimes so this configuration is only useful if you need to run .NET apps without building them from source.) ``` json -{ "features": { "ghcr.io/devcontainers/features/dotnet:2": { "version": "none", @@ -67,6 +62,29 @@ Installing only the .NET Runtime or the ASP.NET Core Runtime. (The SDK includes } ``` +Installing .NET workloads. Multiple workloads can be specified as comma-separated values. + +``` json +"features": { + "ghcr.io/devcontainers/features/dotnet:2": { + "workloads": "aspire, wasm-tools" + } +} +``` + +Installing prerelease builds. Supports `preview` and `daily` suffixes. + +``` json +"features": { + "ghcr.io/devcontainers/features/dotnet:2": { + "version": "10.0-preview", + "additionalVersions": "10.0.1xx-daily", + "dotnetRuntimeVersions": "10.0-daily", + "aspnetCoreRuntimeVersions": "10.0-daily" + } +} +``` + ## OS Support This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. diff --git a/src/dotnet/README.md b/src/dotnet/README.md index 972e5d426..fecaeb3f3 100644 --- a/src/dotnet/README.md +++ b/src/dotnet/README.md @@ -19,6 +19,7 @@ This Feature installs the latest .NET SDK, which includes the .NET CLI and the s | additionalVersions | Enter additional .NET SDK versions, separated by commas. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version. | string | - | | dotnetRuntimeVersions | Enter additional .NET runtime versions, separated by commas. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version. | string | - | | aspNetCoreRuntimeVersions | Enter additional ASP.NET Core runtime versions, separated by commas. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version. | string | - | +| workloads | Enter additional .NET SDK workloads, separated by commas. Use 'dotnet workload search' to learn what workloads are available to install. | string | - | ## Customizations @@ -30,8 +31,7 @@ This Feature installs the latest .NET SDK, which includes the .NET CLI and the s Installing only the latest .NET SDK version (the default). -``` json -{ +``` jsonc "features": { "ghcr.io/devcontainers/features/dotnet:2": "latest" // or "" or {} } @@ -40,7 +40,6 @@ Installing only the latest .NET SDK version (the default). Installing an additional SDK version. Multiple versions can be specified as comma-separated values. ``` json -{ "features": { "ghcr.io/devcontainers/features/dotnet:2": { "additionalVersions": "lts" @@ -51,7 +50,6 @@ Installing an additional SDK version. Multiple versions can be specified as comm Installing specific SDK versions. ``` json -{ "features": { "ghcr.io/devcontainers/features/dotnet:2": { "version": "6.0", @@ -63,7 +61,6 @@ Installing specific SDK versions. Installing a specific SDK feature band. ``` json -{ "features": { "ghcr.io/devcontainers/features/dotnet:2": { "version": "6.0.4xx", @@ -74,7 +71,6 @@ Installing a specific SDK feature band. Installing a specific SDK patch version. ``` json -{ "features": { "ghcr.io/devcontainers/features/dotnet:2": { "version": "6.0.412", @@ -85,7 +81,6 @@ Installing a specific SDK patch version. Installing only the .NET Runtime or the ASP.NET Core Runtime. (The SDK includes all runtimes so this configuration is only useful if you need to run .NET apps without building them from source.) ``` json -{ "features": { "ghcr.io/devcontainers/features/dotnet:2": { "version": "none", @@ -95,6 +90,16 @@ Installing only the .NET Runtime or the ASP.NET Core Runtime. (The SDK includes } ``` +Installing .NET workloads. Multiple workloads can be specified as comma-separated values. + +``` json +"features": { + "ghcr.io/devcontainers/features/dotnet:2": { + "workloads": "aspire, wasm-tools" + } +} +``` + ## OS Support This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. diff --git a/src/dotnet/devcontainer-feature.json b/src/dotnet/devcontainer-feature.json index f00e0d3ea..9389addaa 100644 --- a/src/dotnet/devcontainer-feature.json +++ b/src/dotnet/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "dotnet", - "version": "2.0.0", + "version": "2.4.0", "name": "Dotnet CLI", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/dotnet", "description": "This Feature installs the latest .NET SDK, which includes the .NET CLI and the shared runtime. Options are provided to choose a different version or additional versions.", @@ -11,27 +11,36 @@ "latest", "lts", "none", + "10.0", + "10.0-preview", + "10.0-daily", + "9.0", "8.0", "7.0", "6.0" ], "default": "latest", - "description": "Select or enter a .NET SDK version. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version." + "description": "Select or enter a .NET SDK version. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version, 'X.Y-preview' or 'X.Y-daily' for prereleases." }, "additionalVersions": { "type": "string", "default": "", - "description": "Enter additional .NET SDK versions, separated by commas. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version." + "description": "Enter additional .NET SDK versions, separated by commas. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version, 'X.Y-preview' or 'X.Y-daily' for prereleases." }, "dotnetRuntimeVersions": { "type": "string", "default": "", - "description": "Enter additional .NET runtime versions, separated by commas. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version." + "description": "Enter additional .NET runtime versions, separated by commas. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version, 'X.Y-preview' or 'X.Y-daily' for prereleases." }, "aspNetCoreRuntimeVersions": { "type": "string", "default": "", - "description": "Enter additional ASP.NET Core runtime versions, separated by commas. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version." + "description": "Enter additional ASP.NET Core runtime versions, separated by commas. Use 'latest' for the latest version, 'lts' for the latest LTS version, 'X.Y' or 'X.Y.Z' for a specific version, 'X.Y-preview' or 'X.Y-daily' for prereleases." + }, + "workloads": { + "type": "string", + "default": "", + "description": "Enter additional .NET SDK workloads, separated by commas. Use 'dotnet workload search' to learn what workloads are available to install." } }, "containerEnv": { @@ -44,7 +53,14 @@ "vscode": { "extensions": [ "ms-dotnettools.csharp" - ] + ], + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the .NET SDK, which includes the .NET CLI and the shared runtime, pre-installed and available on the `PATH`, along with the C# language extension for .NET development." + } + ] + } } }, "installsAfter": [ diff --git a/src/dotnet/install.sh b/src/dotnet/install.sh index ff9244d3a..bd85fb91a 100644 --- a/src/dotnet/install.sh +++ b/src/dotnet/install.sh @@ -7,9 +7,17 @@ # Docs: https://github.com/devcontainers/features/tree/main/src/dotnet # Maintainer: The Dev Container spec maintainers DOTNET_VERSION="${VERSION:-"latest"}" -ADDITIONAL_VERSIONS="${ADDITIONALVERSIONS}" -DOTNET_RUNTIME_VERSIONS="${DOTNETRUNTIMEVERSIONS}" -ASPNETCORE_RUNTIME_VERSIONS="${ASPNETCORERUNTIMEVERSIONS}" +ADDITIONAL_VERSIONS="${ADDITIONALVERSIONS:-""}" +DOTNET_RUNTIME_VERSIONS="${DOTNETRUNTIMEVERSIONS:-""}" +ASPNETCORE_RUNTIME_VERSIONS="${ASPNETCORERUNTIMEVERSIONS:-""}" +WORKLOADS="${WORKLOADS:-""}" + +# Prevent "Welcome to .NET" message from dotnet +export DOTNET_NOLOGO=true + +# Prevent generating a development certificate while running this script +# Otherwise it would be stored in the image, which is undesirable +export DOTNET_GENERATE_ASPNET_CERTIFICATE=false set -e @@ -100,17 +108,44 @@ done check_packages wget ca-certificates icu-devtools for version in "${versions[@]}"; do - install_sdk "$version" + read -r clean_version quality < <(parse_version_and_quality "$version") + if [ -n "$quality" ]; then + echo "Interpreting requested version '$version' as version '$clean_version' with quality '$quality'" + fi + install_sdk "$clean_version" "$quality" done for version in "${dotnetRuntimeVersions[@]}"; do - install_runtime "dotnet" "$version" + read -r clean_version quality < <(parse_version_and_quality "$version") + if [ -n "$quality" ]; then + echo "Interpreting requested runtime version '$version' as version '$clean_version' with quality '$quality'" + fi + install_runtime "dotnet" "$clean_version" "$quality" done for version in "${aspNetCoreRuntimeVersions[@]}"; do - install_runtime "aspnetcore" "$version" + read -r clean_version quality < <(parse_version_and_quality "$version") + if [ -n "$quality" ]; then + echo "Interpreting requested ASP.NET Core runtime version '$version' as version '$clean_version' with quality '$quality'" + fi + install_runtime "aspnetcore" "$clean_version" "$quality" done +workloads=() +for workload in $(split_csv "$WORKLOADS"); do + workloads+=("$workload") +done + +if [ ${#workloads[@]} -ne 0 ]; then + install_workloads "${workloads[@]}" +fi + +# Create a symbolic link '/usr/bin/dotnet', to make dotnet available to 'sudo' +# This is necessary because 'sudo' resets the PATH variable, so it won't search the DOTNET_ROOT directory +if [ ! -e /usr/bin/dotnet ]; then + ln --symbolic "$DOTNET_ROOT/dotnet" /usr/bin/dotnet +fi + # Clean up rm -rf /var/lib/apt/lists/* rm -rf scripts diff --git a/src/dotnet/scripts/dotnet-helpers.sh b/src/dotnet/scripts/dotnet-helpers.sh index bda0c9c3a..2ef8796eb 100644 --- a/src/dotnet/scripts/dotnet-helpers.sh +++ b/src/dotnet/scripts/dotnet-helpers.sh @@ -8,7 +8,6 @@ # Maintainer: The Dev Container spec maintainers DOTNET_SCRIPTS=$(dirname "${BASH_SOURCE[0]}") DOTNET_INSTALL_SCRIPT="$DOTNET_SCRIPTS/vendor/dotnet-install.sh" -DOTNET_INSTALL_DIR='/usr/share/dotnet' # Prints the latest dotnet version in the specified channel # Usage: fetch_latest_version_in_channel [] @@ -19,13 +18,12 @@ fetch_latest_version_in_channel() { local channel="$1" local runtime="$2" if [ "$runtime" = "dotnet" ]; then - wget -qO- "https://dotnetcli.azureedge.net/dotnet/Runtime/$channel/latest.version" + wget -qO- "https://builds.dotnet.microsoft.com/dotnet/Runtime/$channel/latest.version" elif [ "$runtime" = "aspnetcore" ]; then - wget -qO- "https://dotnetcli.azureedge.net/dotnet/aspnetcore/Runtime/$channel/latest.version" + wget -qO- "https://builds.dotnet.microsoft.com/dotnet/aspnetcore/Runtime/$channel/latest.version" else - wget -qO- "https://dotnetcli.azureedge.net/dotnet/Sdk/$channel/latest.version" + wget -qO- "https://builds.dotnet.microsoft.com/dotnet/Sdk/$channel/latest.version" fi - } # Prints the latest dotnet version @@ -47,9 +45,12 @@ fetch_latest_version() { } # Installs a version of the .NET SDK -# Usage: install_sdk +# Usage: install_sdk [] +# Example: install_sdk "9.0" +# Example: install_sdk "10.0" "preview" install_sdk() { - local inputVersion="$1" + local inputVersion="$1" # Could be 'latest', 'lts', 'X.Y', 'X.Y.Z', 'X.Y.4xx', or base channel when paired with quality + local quality="$2" # Optional quality: GA, preview, daily (empty implies GA) local version="" local channel="" if [[ "$inputVersion" == "latest" ]]; then @@ -75,20 +76,25 @@ install_sdk() { version="$inputVersion" fi - # Currently this script does not make it possible to qualify the version, 'GA' is always implied - echo "Executing $DOTNET_INSTALL_SCRIPT --version $version --channel $channel --install-dir $DOTNET_INSTALL_DIR --no-path" - "$DOTNET_INSTALL_SCRIPT" \ - --version "$version" \ - --channel "$channel" \ - --install-dir "$DOTNET_INSTALL_DIR" \ - --no-path + local cmd=("$DOTNET_INSTALL_SCRIPT" "--version" "$version" "--install-dir" "$DOTNET_ROOT") + if [ -n "$channel" ]; then + cmd+=("--channel" "$channel") + fi + if [ -n "$quality" ]; then + cmd+=("--quality" "$quality") + fi + echo "Executing ${cmd[*]}" + "${cmd[@]}" } # Installs a version of the .NET Runtime -# Usage: install_runtime +# Usage: install_runtime [] +# Example: install_runtime "dotnet" "9.0" +# Example: install_runtime "aspnetcore" "10.0" "preview" install_runtime() { local runtime="$1" - local inputVersion="$2" + local inputVersion="$2" # Could be 'latest', 'lts', 'X.Y', 'X.Y.Z' + local quality="$3" # Optional quality: GA, preview, daily (empty implies GA) local version="" local channel="" if [[ "$inputVersion" == "latest" ]]; then @@ -108,12 +114,74 @@ install_runtime() { # Assume version is an exact version string like '6.0.21' or '8.0.0-preview.7.23375.6' version="$inputVersion" fi - - echo "Executing $DOTNET_INSTALL_SCRIPT --runtime $runtime --version $version --channel $channel --install-dir $DOTNET_INSTALL_DIR --no-path" - "$DOTNET_INSTALL_SCRIPT" \ - --runtime "$runtime" \ - --version "$version" \ - --channel "$channel" \ - --install-dir "$DOTNET_INSTALL_DIR" \ - --no-path + + local cmd=("$DOTNET_INSTALL_SCRIPT" "--runtime" "$runtime" "--version" "$version" "--install-dir" "$DOTNET_ROOT" "--no-path") + if [ -n "$channel" ]; then + cmd+=("--channel" "$channel") + fi + if [ -n "$quality" ]; then + cmd+=("--quality" "$quality") + fi + echo "Executing ${cmd[*]}" + "${cmd[@]}" } + +# Installs one or more .NET workloads +# Usage: install_workload [ ...] +# Reference: https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-workload-install +install_workloads() { + local workloads="$@" + + echo "Installing .NET workload(s) $workloads" + dotnet workload install $workloads --temp-dir /tmp/dotnet-workload-temp-dir + + # Clean up + rm -r /tmp/dotnet-workload-temp-dir +} + +# Input: version spec possibly containing -preview or -daily +# Supports channels in the forms: +# A.B (e.g. 10.0) +# A.B.Cxx (feature band e.g. 6.0.4xx) +# A.B-preview (adds quality) +# A.B-daily +# A.B.Cxx-preview +# A.B.Cxx-daily +# Output (stdout): " " +# - For channel specs (A.B or A.B.Cxx) without suffix -> quality is GA +# - For channel specs with -preview/-daily suffix -> quality is preview/daily +# - For exact version specs (contain a third numeric segment or prerelease labels beyond channel patterns, e.g. 8.0.100-rc.2.23502.2) -> quality is empty +# Examples: +# parse_version_and_quality "10.0-preview" => "10.0 preview" +# parse_version_and_quality "10.0-daily" => "10.0 daily" +# parse_version_and_quality "10.0" => "10.0 GA" +# parse_version_and_quality "6.0.4xx" => "6.0.4xx GA" +# parse_version_and_quality "6.0.4xx-preview" => "6.0.4xx preview" +# parse_version_and_quality "6.0.4xx-daily" => "6.0.4xx daily" +parse_version_and_quality() { + local input="$1" + local quality="" + local clean_version="$input" + # Match feature band with quality + if [[ "$input" =~ ^([0-9]+\.[0-9]+\.[0-9]xx)-(preview|daily)$ ]]; then + clean_version="${BASH_REMATCH[1]}" + quality="${BASH_REMATCH[2]}" + # Match simple channel with quality + elif [[ "$input" =~ ^([0-9]+\.[0-9]+)-(preview|daily)$ ]]; then + clean_version="${BASH_REMATCH[1]}" + quality="${BASH_REMATCH[2]}" + # Match plain feature band channel (defaults to GA) + elif [[ "$input" =~ ^[0-9]+\.[0-9]+\.[0-9]xx$ ]]; then + clean_version="$input" + quality="GA" + # Match simple channel (defaults to GA) + elif [[ "$input" =~ ^[0-9]+\.[0-9]+$ ]]; then + clean_version="$input" + quality="GA" + else + # Exact version (leave quality empty) + clean_version="$input" + quality="" + fi + echo "$clean_version" "$quality" +} \ No newline at end of file diff --git a/src/dotnet/scripts/vendor/README.md b/src/dotnet/scripts/vendor/README.md index 181b53781..9e330e524 100644 --- a/src/dotnet/scripts/vendor/README.md +++ b/src/dotnet/scripts/vendor/README.md @@ -23,5 +23,6 @@ dotnet-install.sh [--version latest] --channel 6.0 [--quality GA] dotnet-install.sh [--version latest] --channel 6.0.4xx [--quality GA] dotnet-install.sh [--version latest] --channel 8.0 --quality preview dotnet-install.sh [--version latest] --channel 8.0 --quality daily +dotnet-install.sh [--version latest] --channel 10.0 --quality preview dotnet-install.sh --version 6.0.413 ``` \ No newline at end of file diff --git a/src/dotnet/scripts/vendor/dotnet-install.sh b/src/dotnet/scripts/vendor/dotnet-install.sh index a830583cd..034d2dfb1 100755 --- a/src/dotnet/scripts/vendor/dotnet-install.sh +++ b/src/dotnet/scripts/vendor/dotnet-install.sh @@ -298,11 +298,20 @@ get_machine_architecture() { if command -v uname > /dev/null; then CPUName=$(uname -m) case $CPUName in + armv1*|armv2*|armv3*|armv4*|armv5*|armv6*) + echo "armv6-or-below" + return 0 + ;; armv*l) echo "arm" return 0 ;; aarch64|arm64) + if [ "$(getconf LONG_BIT)" -lt 64 ]; then + # This is 32-bit OS running on 64-bit CPU (for example Raspberry Pi OS) + echo "arm" + return 0 + fi echo "arm64" return 0 ;; @@ -314,6 +323,18 @@ get_machine_architecture() { echo "ppc64le" return 0 ;; + loongarch64) + echo "loongarch64" + return 0 + ;; + riscv64) + echo "riscv64" + return 0 + ;; + powerpc|ppc) + echo "ppc" + return 0 + ;; esac fi @@ -330,7 +351,13 @@ get_normalized_architecture_from_architecture() { local architecture="$(to_lowercase "$1")" if [[ $architecture == \ ]]; then - echo "$(get_machine_architecture)" + machine_architecture="$(get_machine_architecture)" + if [[ "$machine_architecture" == "armv6-or-below" ]]; then + say_err "Architecture \`$machine_architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues" + return 1 + fi + + echo $machine_architecture return 0 fi @@ -355,6 +382,10 @@ get_normalized_architecture_from_architecture() { echo "ppc64le" return 0 ;; + loongarch64) + echo "loongarch64" + return 0 + ;; esac say_err "Architecture \`$architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues" @@ -392,11 +423,17 @@ get_normalized_architecture_for_specific_sdk_version() { # args: # version or channel - $1 is_arm64_supported() { - #any channel or version that starts with the specified versions - case "$1" in - ( "1"* | "2"* | "3"* | "4"* | "5"*) - echo false - return 0 + # Extract the major version by splitting on the dot + major_version="${1%%.*}" + + # Check if the major version is a valid number and less than 6 + case "$major_version" in + [0-9]*) + if [ "$major_version" -lt 6 ]; then + echo false + return 0 + fi + ;; esac echo true @@ -415,8 +452,13 @@ get_normalized_os() { echo "$osname" return 0 ;; + macos) + osname='osx' + echo "$osname" + return 0 + ;; *) - say_err "'$user_defined_os' is not a supported value for --os option, supported values are: osx, linux, linux-musl, freebsd, rhel.6. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues." + say_err "'$user_defined_os' is not a supported value for --os option, supported values are: osx, macos, linux, linux-musl, freebsd, rhel.6. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues." return 1 ;; esac @@ -435,7 +477,7 @@ get_normalized_quality() { local quality="$(to_lowercase "$1")" if [ ! -z "$quality" ]; then case "$quality" in - daily | signed | validated | preview) + daily | preview) echo "$quality" return 0 ;; @@ -444,7 +486,7 @@ get_normalized_quality() { return 0 ;; *) - say_err "'$quality' is not a supported value for --quality option. Supported values are: daily, signed, validated, preview, ga. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues." + say_err "'$quality' is not a supported value for --quality option. Supported values are: daily, preview, ga. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues." return 1 ;; esac @@ -546,6 +588,40 @@ is_dotnet_package_installed() { fi } +# args: +# downloaded file - $1 +# remote_file_size - $2 +validate_remote_local_file_sizes() +{ + eval $invocation + + local downloaded_file="$1" + local remote_file_size="$2" + local file_size='' + + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + file_size="$(stat -c '%s' "$downloaded_file")" + elif [[ "$OSTYPE" == "darwin"* ]]; then + # hardcode in order to avoid conflicts with GNU stat + file_size="$(/usr/bin/stat -f '%z' "$downloaded_file")" + fi + + if [ -n "$file_size" ]; then + say "Downloaded file size is $file_size bytes." + + if [ -n "$remote_file_size" ] && [ -n "$file_size" ]; then + if [ "$remote_file_size" -ne "$file_size" ]; then + say "The remote and local file sizes are not equal. The remote file size is $remote_file_size bytes and the local size is $file_size bytes. The local package may be corrupted." + else + say "The remote and local file sizes are equal." + fi + fi + + else + say "Either downloaded or local package size can not be measured. One of them may be corrupted." + fi +} + # args: # azure_feed - $1 # channel - $2 @@ -880,6 +956,37 @@ get_absolute_path() { return 0 } +# args: +# override - $1 (boolean, true or false) +get_cp_options() { + eval $invocation + + local override="$1" + local override_switch="" + + if [ "$override" = false ]; then + override_switch="-n" + + # create temporary files to check if 'cp -u' is supported + tmp_dir="$(mktemp -d)" + tmp_file="$tmp_dir/testfile" + tmp_file2="$tmp_dir/testfile2" + + touch "$tmp_file" + + # use -u instead of -n if it's available + if cp -u "$tmp_file" "$tmp_file2" 2>/dev/null; then + override_switch="-u" + fi + + # clean up + rm -f "$tmp_file" "$tmp_file2" + rm -rf "$tmp_dir" + fi + + echo "$override_switch" +} + # args: # input_files - stdin # root_path - $1 @@ -891,15 +998,7 @@ copy_files_or_dirs_from_list() { local root_path="$(remove_trailing_slash "$1")" local out_path="$(remove_trailing_slash "$2")" local override="$3" - local osname="$(get_current_os_name)" - local override_switch=$( - if [ "$override" = false ]; then - if [ "$osname" = "linux-musl" ]; then - printf -- "-u"; - else - printf -- "-n"; - fi - fi) + local override_switch="$(get_cp_options "$override")" cat | uniq | while read -r file_path; do local path="$(remove_beginning_slash "${file_path#$root_path}")" @@ -914,14 +1013,39 @@ copy_files_or_dirs_from_list() { done } +# args: +# zip_uri - $1 +get_remote_file_size() { + local zip_uri="$1" + + if machine_has "curl"; then + file_size=$(curl -sI "$zip_uri" | grep -i content-length | awk '{ num = $2 + 0; print num }') + elif machine_has "wget"; then + file_size=$(wget --spider --server-response -O /dev/null "$zip_uri" 2>&1 | grep -i 'Content-Length:' | awk '{ num = $2 + 0; print num }') + else + say "Neither curl nor wget is available on this system." + return + fi + + if [ -n "$file_size" ]; then + say "Remote file $zip_uri size is $file_size bytes." + echo "$file_size" + else + say_verbose "Content-Length header was not extracted for $zip_uri." + echo "" + fi +} + # args: # zip_path - $1 # out_path - $2 +# remote_file_size - $3 extract_dotnet_package() { eval $invocation local zip_path="$1" local out_path="$2" + local remote_file_size="$3" local temp_out_path="$(mktemp -d "$temporary_file_template")" @@ -931,9 +1055,13 @@ extract_dotnet_package() { local folders_with_version_regex='^.*/[0-9]+\.[0-9]+[^/]+/' find "$temp_out_path" -type f | grep -Eo "$folders_with_version_regex" | sort | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" false find "$temp_out_path" -type f | grep -Ev "$folders_with_version_regex" | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" "$override_non_versioned_files" - + + validate_remote_local_file_sizes "$zip_path" "$remote_file_size" + rm -rf "$temp_out_path" - rm -f "$zip_path" && say_verbose "Temporary zip file $zip_path was removed" + if [ -z ${keep_zip+x} ]; then + rm -f "$zip_path" && say_verbose "Temporary archive file $zip_path was removed" + fi if [ "$failed" = true ]; then say_err "Extraction failed" @@ -1070,13 +1198,19 @@ downloadcurl() { local curl_options="--retry 20 --retry-delay 2 --connect-timeout 15 -sSL -f --create-dirs " local curl_exit_code=0; if [ -z "$out_path" ]; then - curl $curl_options "$remote_path_with_credential" 2>&1 + curl_output=$(curl $curl_options "$remote_path_with_credential" 2>&1) curl_exit_code=$? + echo "$curl_output" else - curl $curl_options -o "$out_path" "$remote_path_with_credential" 2>&1 + curl_output=$(curl $curl_options -o "$out_path" "$remote_path_with_credential" 2>&1) curl_exit_code=$? fi - + + # Regression in curl causes curl with --retry to return a 0 exit code even when it fails to download a file - https://github.com/curl/curl/issues/17554 + if [ $curl_exit_code -eq 0 ] && echo "$curl_output" | grep -q "^curl: ([0-9]*) "; then + curl_exit_code=$(echo "$curl_output" | sed 's/curl: (\([0-9]*\)).*/\1/') + fi + if [ $curl_exit_code -gt 0 ]; then download_error_msg="Unable to download $remote_path." # Check for curl timeout codes @@ -1180,6 +1314,12 @@ get_download_link_from_aka_ms() { http_codes=$( echo "$response" | awk '$1 ~ /^HTTP/ {print $2}' ) # They all need to be 301, otherwise some links are broken (except for the last, which is not a redirect but 200 or 404). broken_redirects=$( echo "$http_codes" | sed '$d' | grep -v '301' ) + # The response may end without final code 2xx/4xx/5xx somehow, e.g. network restrictions on www.bing.com causes redirecting to bing.com fails with connection refused. + # In this case it should not exclude the last. + last_http_code=$( echo "$http_codes" | tail -n 1 ) + if ! [[ $last_http_code =~ ^(2|4|5)[0-9][0-9]$ ]]; then + broken_redirects=$( echo "$http_codes" | grep -v '301' ) + fi # All HTTP codes are 301 (Moved Permanently), the redirect link exists. if [[ -z "$broken_redirects" ]]; then @@ -1201,23 +1341,16 @@ get_download_link_from_aka_ms() { get_feeds_to_use() { feeds=( - "https://dotnetcli.azureedge.net/dotnet" - "https://dotnetbuilds.azureedge.net/public" + "https://builds.dotnet.microsoft.com/dotnet" + "https://ci.dot.net/public" ) if [[ -n "$azure_feed" ]]; then feeds=("$azure_feed") fi - if [[ "$no_cdn" == "true" ]]; then - feeds=( - "https://dotnetcli.blob.core.windows.net/dotnet" - "https://dotnetbuilds.blob.core.windows.net/public" - ) - - if [[ -n "$uncached_feed" ]]; then - feeds=("$uncached_feed") - fi + if [[ -n "$uncached_feed" ]]; then + feeds=("$uncached_feed") fi } @@ -1349,7 +1482,7 @@ generate_regular_links() { link_types+=("legacy") else legacy_download_link="" - say_verbose "Cound not construct a legacy_download_link; omitting..." + say_verbose "Could not construct a legacy_download_link; omitting..." fi # Check if the SDK version is already installed. @@ -1427,10 +1560,11 @@ install_dotnet() { eval $invocation local download_failed=false local download_completed=false + local remote_file_size=0 mkdir -p "$install_root" - zip_path="$(mktemp "$temporary_file_template")" - say_verbose "Zip path: $zip_path" + zip_path="${zip_path:-$(mktemp "$temporary_file_template")}" + say_verbose "Archive path: $zip_path" for link_index in "${!download_links[@]}" do @@ -1451,10 +1585,10 @@ install_dotnet() { say "The resource at $link_type link '$download_link' is not available." ;; *) - say "Failed to download $link_type link '$download_link': $download_error_msg" + say "Failed to download $link_type link '$download_link': $http_code $download_error_msg" ;; esac - rm -f "$zip_path" 2>&1 && say_verbose "Temporary zip file $zip_path was removed" + rm -f "$zip_path" 2>&1 && say_verbose "Temporary archive file $zip_path was removed" else download_completed=true break @@ -1467,8 +1601,10 @@ install_dotnet() { return 1 fi - say "Extracting zip from $download_link" - extract_dotnet_package "$zip_path" "$install_root" || return 1 + remote_file_size="$(get_remote_file_size "$download_link")" + + say "Extracting archive from $download_link" + extract_dotnet_package "$zip_path" "$install_root" "$remote_file_size" || return 1 # Check if the SDK version is installed; if not, fail the installation. # if the version contains "RTM" or "servicing"; check if a 'release-type' SDK version is installed. @@ -1510,7 +1646,6 @@ install_dir="" architecture="" dry_run=false no_path=false -no_cdn=false azure_feed="" uncached_feed="" feed_credential="" @@ -1583,10 +1718,6 @@ do verbose=true non_dynamic_parameters+=" $name" ;; - --no-cdn|-[Nn]o[Cc]dn) - no_cdn=true - non_dynamic_parameters+=" $name" - ;; --azure-feed|-[Aa]zure[Ff]eed) shift azure_feed="$1" @@ -1618,10 +1749,22 @@ do override_non_versioned_files=false non_dynamic_parameters+=" $name" ;; + --keep-zip|-[Kk]eep[Zz]ip) + keep_zip=true + non_dynamic_parameters+=" $name" + ;; + --zip-path|-[Zz]ip[Pp]ath) + shift + zip_path="$1" + ;; -?|--?|-h|--help|-[Hh]elp) - script_name="$(basename "$0")" + script_name="dotnet-install.sh" echo ".NET Tools Installer" - echo "Usage: $script_name [-c|--channel ] [-v|--version ] [-p|--prefix ]" + echo "Usage:" + echo " # Install a .NET SDK of a given Quality from a given Channel" + echo " $script_name [-c|--channel ] [-q|--quality ]" + echo " # Install a .NET SDK of a specific public version" + echo " $script_name [-v|--version ]" echo " $script_name -h|-?|--help" echo "" echo "$script_name is a simple command line interface for obtaining dotnet cli." @@ -1651,7 +1794,7 @@ do echo " examples: 2.0.0-preview2-006120; 1.1.0" echo " -q,--quality Download the latest build of specified quality in the channel." echo " -Quality" - echo " The possible values are: daily, signed, validated, preview, GA." + echo " The possible values are: daily, preview, GA." echo " Works only in combination with channel. Not applicable for STS and LTS channels and will be ignored if those channels are used." echo " For SDK use channel in A.B.Cxx format. Using quality for SDK together with channel in A.B format is not supported." echo " Supported since 5.0 release." @@ -1663,7 +1806,7 @@ do echo " -InstallDir" echo " --architecture Architecture of dotnet binaries to be installed, Defaults to \`$architecture\`." echo " --arch,-Architecture,-Arch" - echo " Possible values: x64, arm, arm64, s390x and ppc64le" + echo " Possible values: x64, arm, arm64, s390x, ppc64le and loongarch64" echo " --os Specifies operating system to be used when selecting the installer." echo " Overrides the OS determination approach used by the script. Supported values: osx, linux, linux-musl, freebsd, rhel.6." echo " In case any other value is provided, the platform will be determined by the script based on machine configuration." @@ -1679,15 +1822,14 @@ do echo " --verbose,-Verbose Display diagnostics information." echo " --azure-feed,-AzureFeed For internal use only." echo " Allows using a different storage to download SDK archives from." - echo " This parameter is only used if --no-cdn is false." echo " --uncached-feed,-UncachedFeed For internal use only." echo " Allows using a different storage to download SDK archives from." - echo " This parameter is only used if --no-cdn is true." echo " --skip-non-versioned-files Skips non-versioned files if they already exist, such as the dotnet executable." echo " -SkipNonVersionedFiles" - echo " --no-cdn,-NoCdn Disable downloading from the Azure CDN, and use the uncached feed directly." echo " --jsonfile Determines the SDK version from a user specified global.json file." echo " Note: global.json must have a value for 'SDK:Version'" + echo " --keep-zip,-KeepZip If set, downloaded file is kept." + echo " --zip-path, -ZipPath If set, downloaded file is stored at the specified path." echo " -?,--?,-h,--help,-Help Shows this help message" echo "" echo "Install Location:" diff --git a/src/git-lfs/README.md b/src/git-lfs/README.md index 9a9a06659..07f671755 100644 --- a/src/git-lfs/README.md +++ b/src/git-lfs/README.md @@ -17,6 +17,7 @@ Installs Git Large File Support (Git LFS) along with needed dependencies. Useful |-----|-----|-----|-----| | version | Select version of Git LFS to install | string | latest | | autoPull | Automatically pull LFS files when creating the container. When false, running 'git lfs pull' in the container will have the same effect. | boolean | true | +| installDirectlyFromGitHubRelease | Installs 'git-lfs' from GitHub releases instead of package manager feeds | boolean | false | diff --git a/src/git-lfs/devcontainer-feature.json b/src/git-lfs/devcontainer-feature.json index dc61dead9..fe23e02cf 100644 --- a/src/git-lfs/devcontainer-feature.json +++ b/src/git-lfs/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "git-lfs", - "version": "1.1.0", + "version": "1.2.5", "name": "Git Large File Support (LFS)", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/git-lfs", "description": "Installs Git Large File Support (Git LFS) along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like git and curl.", @@ -18,10 +18,26 @@ "type": "boolean", "default": true, "description": "Automatically pull LFS files when creating the container. When false, running 'git lfs pull' in the container will have the same effect." + }, + "installDirectlyFromGitHubRelease": { + "type": "boolean", + "default": false, + "description": "Installs 'git-lfs' from GitHub releases instead of package manager feeds" + } + }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes Git Large File Support (Git LFS) along with needed dependencies pre-installed and available on the `PATH`." + } + ] + } } }, "postCreateCommand": "/usr/local/share/pull-git-lfs-artifacts.sh", "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] -} +} \ No newline at end of file diff --git a/src/git-lfs/install.sh b/src/git-lfs/install.sh index 393109995..71066c7b3 100755 --- a/src/git-lfs/install.sh +++ b/src/git-lfs/install.sh @@ -9,14 +9,12 @@ GIT_LFS_VERSION=${VERSION:-"latest"} AUTO_PULL=${AUTOPULL:="true"} +INSTALL_WITH_GITHUB=${INSTALLDIRECTLYFROMGITHUBRELEASE:="false"} GIT_LFS_ARCHIVE_GPG_KEY_URI="https://packagecloud.io/github/git-lfs/gpgkey" GIT_LFS_ARCHIVE_ARCHITECTURES="amd64 arm64" GIT_LFS_ARCHIVE_VERSION_CODENAMES="stretch buster bullseye bionic focal jammy" GIT_LFS_CHECKSUM_GPG_KEYS="0x88ace9b29196305ba9947552f1ba225c0223b187 0x86cd3297749375bcf8206715f54fe648088335a9 0xaa3b3450295830d2de6db90caba67be5a5795889" -GPG_KEY_SERVERS="keyserver hkp://keyserver.ubuntu.com -keyserver hkps://keys.openpgp.org -keyserver hkp://keyserver.pgp.com" set -e @@ -62,15 +60,56 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } +# Get the list of GPG key servers that are reachable +get_gpg_key_servers() { + local curl_args="" + local keyserver_reachable=false # Flag to indicate if any keyserver is reachable + + if [ ! -z "${KEYSERVER_PROXY}" ]; then + curl_args="--proxy ${KEYSERVER_PROXY}" + fi + + test_keyserver() { + local keyserver="$1" + local keyserver_curl_url="$2" + if curl -s ${curl_args} --max-time 5 "${keyserver_curl_url}" > /dev/null; then + echo "keyserver ${keyserver}" + keyserver_reachable=true + else + echo "(*) Keyserver ${keyserver} is not reachable." >&2 + fi + } + + # Explicitly test these in order because Bash v4.4.20 (Ubuntu Bionic) + # enumerates associative array keys in a different order than Bash v5 + test_keyserver "hkp://keyserver.ubuntu.com" "http://keyserver.ubuntu.com:11371" + test_keyserver "hkp://keyserver.ubuntu.com:80" "http://keyserver.ubuntu.com" + test_keyserver "hkp://keyserver.pgp.com" "http://keyserver.pgp.com:11371" + # Test this server last because keys.openpgp.org strips user IDs from keys unless + # the owner gives permission, which causes gpg in Ubuntu Bionic to reject the key + # (https://github.com/devcontainers/features/issues/1055) + test_keyserver "hkps://keys.openpgp.org" "https://keys.openpgp.org" + + if ! $keyserver_reachable; then + echo "(!) No keyserver is reachable." >&2 + exit 1 + fi +} + # Import the specified key in a variable name passed in as receive_gpg_keys() { local keys=${!1} + # Install curl + if ! type curl > /dev/null 2>&1; then + check_packages curl + fi + # Use a temporary location for gpg keys to avoid polluting image export GNUPGHOME="/tmp/tmp-gnupg" mkdir -p ${GNUPGHOME} chmod 700 ${GNUPGHOME} - echo -e "disable-ipv6\n${GPG_KEY_SERVERS}" > ${GNUPGHOME}/dirmngr.conf + echo -e "disable-ipv6\n$(get_gpg_key_servers)" > ${GNUPGHOME}/dirmngr.conf # GPG key download sometimes fails for some reason and retrying fixes it. local retry_count=0 local gpg_ok="false" @@ -80,7 +119,7 @@ receive_gpg_keys() { echo "(*) Downloading GPG key..." ( echo "${keys}" | xargs -n 1 gpg --recv-keys) 2>&1 && gpg_ok="true" if [ "${gpg_ok}" != "true" ]; then - echo "(*) Failed getting key, retring in 10s..." + echo "(*) Failed getting key, retrying in 10s..." (( retry_count++ )) sleep 10s fi @@ -129,14 +168,34 @@ install_using_apt() { git-lfs install --skip-repo } +# Function to fetch the version released prior to the latest version +get_previous_version() { + repo_url=$1 + curl -s "$repo_url" | jq -r 'del(.[].assets) | .[0].tag_name' +} + +install_from_release() { + git_lfs_filename="git-lfs-linux-${architecture}-v${GIT_LFS_VERSION}.tar.gz" + echo "Looking for release artfact: ${git_lfs_filename}" + curl -sSL -o "${git_lfs_filename}" "https://github.com/git-lfs/git-lfs/releases/download/v${GIT_LFS_VERSION}/${git_lfs_filename}" +} + install_using_github() { echo "(*) No apt package for ${VERSION_CODENAME} ${architecture}. Installing manually." mkdir -p /tmp/git-lfs cd /tmp/git-lfs find_version_from_git_tags GIT_LFS_VERSION "https://github.com/git-lfs/git-lfs" - git_lfs_filename="git-lfs-linux-${architecture}-v${GIT_LFS_VERSION}.tar.gz" - echo "Looking for release artfact: ${git_lfs_filename}" - curl -sSL -o "${git_lfs_filename}" "https://github.com/git-lfs/git-lfs/releases/download/v${GIT_LFS_VERSION}/${git_lfs_filename}" + install_from_release + + if grep -q "Not Found" "${git_lfs_filename}"; then + echo -e "\n(!) Failed to fetch the latest artifacts for Git lfs v${GIT_LFS_VERSION}..." + repo_url=https://api.github.com/repos/git-lfs/git-lfs/releases + requested_version=$(get_previous_version "${repo_url}") + echo -e "\nAttempting to install ${requested_version}" + GIT_LFS_VERSION=${requested_version#v} + install_from_release + fi + # Verify file curl -sSL -o "sha256sums.asc" "https://github.com/git-lfs/git-lfs/releases/download/v${GIT_LFS_VERSION}/sha256sums.asc" receive_gpg_keys GIT_LFS_CHECKSUM_GPG_KEYS @@ -164,7 +223,7 @@ export DEBIAN_FRONTEND=noninteractive # Install git, curl, gpg, dirmngr and debian-archive-keyring if missing . /etc/os-release -check_packages curl ca-certificates gnupg2 dirmngr apt-transport-https +check_packages curl ca-certificates gnupg2 dirmngr apt-transport-https jq if ! type git > /dev/null 2>&1; then check_packages git fi @@ -175,14 +234,14 @@ fi # Install Git LFS echo "Installing Git LFS..." architecture="$(dpkg --print-architecture)" -if [[ "${GIT_LFS_ARCHIVE_ARCHITECTURES}" = *"${architecture}"* ]] && [[ "${GIT_LFS_ARCHIVE_VERSION_CODENAMES}" = *"${VERSION_CODENAME}"* ]]; then - install_using_apt || use_github="true" +if [[ "${GIT_LFS_ARCHIVE_ARCHITECTURES}" = *"${architecture}"* ]] && [[ "${GIT_LFS_ARCHIVE_VERSION_CODENAMES}" = *"${VERSION_CODENAME}"* ]] && [[ "${INSTALL_WITH_GITHUB}" = "false" ]]; then + install_using_apt || INSTALL_WITH_GITHUB="true" else - use_github="true" + INSTALL_WITH_GITHUB="true" fi # If no archive exists or apt install fails, try direct from github -if [ "${use_github}" = "true" ]; then +if [ "${INSTALL_WITH_GITHUB}" = "true" ]; then install_using_github fi @@ -211,7 +270,7 @@ if ! git lfs ls-files > /dev/null 2>&1; then echo "(!) Skipping automatic 'git lfs pull' because no git lfs files were detected" exit 0 fi - +git lfs install git lfs pull EOF diff --git a/src/git/NOTES.md b/src/git/NOTES.md index 19fe92f31..507ba8e8b 100644 --- a/src/git/NOTES.md +++ b/src/git/NOTES.md @@ -2,6 +2,6 @@ ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +This Feature should work on recent versions of Alpine, Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and RockyLinux distributions with the `apk`, `apt`, `yum`, `dnf`, or `microdnf` package manager installed. `bash` is required to execute the `install.sh` script. diff --git a/src/git/README.md b/src/git/README.md index a9e54fa18..e750bbed2 100644 --- a/src/git/README.md +++ b/src/git/README.md @@ -16,13 +16,13 @@ Install an up-to-date version of Git, built from source as needed. Useful for wh | Options Id | Description | Type | Default Value | |-----|-----|-----|-----| | version | Select or enter a Git version. | string | os-provided | -| ppa | Install from PPA if available | boolean | true | +| ppa | Install from PPA if available (only supported for Ubuntu distributions) | boolean | true | ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +This Feature should work on recent versions of Alpine, Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and RockyLinux distributions with the `apk`, `apt`, `yum`, `dnf`, or `microdnf` package manager installed. `bash` is required to execute the `install.sh` script. diff --git a/src/git/devcontainer-feature.json b/src/git/devcontainer-feature.json index 23e1c04f4..7531ad2ab 100644 --- a/src/git/devcontainer-feature.json +++ b/src/git/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "git", - "version": "1.1.5", + "version": "1.3.4", "name": "Git (from source)", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/git", "description": "Install an up-to-date version of Git, built from source as needed. Useful for when you want the latest and greatest features. Auto-detects latest stable version and installs needed dependencies.", @@ -9,6 +9,7 @@ "type": "string", "proposals": [ "latest", + "system", "os-provided" ], "default": "os-provided", @@ -17,7 +18,18 @@ "ppa": { "type": "boolean", "default": true, - "description": "Install from PPA if available" + "description": "Install from PPA if available (only supported for Ubuntu distributions)" + } + }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes an up-to-date version of Git, built from source as needed, pre-installed and available on the `PATH`." + } + ] + } } }, "installsAfter": [ diff --git a/src/git/install.sh b/src/git/install.sh index 7bfa490e7..4124fc80c 100755 --- a/src/git/install.sh +++ b/src/git/install.sh @@ -10,22 +10,105 @@ GIT_VERSION=${VERSION} # 'system' checks the base image first, else installs 'latest' USE_PPA_IF_AVAILABLE=${PPA} -GIT_CORE_PPA_ARCHIVE_GPG_KEY=E1DD270288B4E6030699E45FA1715D88E1DF1F24 -GPG_KEY_SERVERS="keyserver hkp://keyserver.ubuntu.com -keyserver hkps://keys.openpgp.org -keyserver hkp://keyserver.pgp.com" - -set -e - -# Clean up -rm -rf /var/lib/apt/lists/* +GIT_CORE_PPA_ARCHIVE_GPG_KEY=F911AB184317630C59970973E363C90F8F1B6217 if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' exit 1 fi -# Import the specified key in a variable name passed in as +# Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME +. /etc/os-release +# Get an adjusted ID independent of distro variants +if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then + ADJUSTED_ID="debian" +elif [ "${ID}" = "alpine" ]; then + ADJUSTED_ID="alpine" +elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then + ADJUSTED_ID="rhel" + VERSION_CODENAME="${ID}${VERSION_ID}" +else + echo "Linux distro ${ID} not supported." + exit 1 +fi + +if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then + # As of 1 July 2024, mirrorlist.centos.org no longer exists. + # Update the repo files to reference vault.centos.org. + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo +fi + +if type apt-get > /dev/null 2>&1; then + INSTALL_CMD=apt-get +elif type apk > /dev/null 2>&1; then + INSTALL_CMD=apk +elif type microdnf > /dev/null 2>&1; then + INSTALL_CMD=microdnf +elif type dnf > /dev/null 2>&1; then + INSTALL_CMD=dnf +elif type yum > /dev/null 2>&1; then + INSTALL_CMD=yum +else + echo "(Error) Unable to find a supported package manager." + exit 1 +fi + +# Clean up +clean_up() { + case $ADJUSTED_ID in + debian) + rm -rf /var/lib/apt/lists/* + ;; + alpine) + rm -rf /var/cache/apk/* + ;; + rhel) + rm -rf /var/cache/dnf/* + rm -rf /var/cache/yum/* + ;; + esac +} +clean_up + +# Get the list of GPG key servers that are reachable +get_gpg_key_servers() { + local curl_args="" + local keyserver_reachable=false # Flag to indicate if any keyserver is reachable + + if [ ! -z "${KEYSERVER_PROXY}" ]; then + curl_args="--proxy ${KEYSERVER_PROXY}" + fi + + test_keyserver() { + local keyserver="$1" + local keyserver_curl_url="$2" + if curl -s ${curl_args} --max-time 5 "${keyserver_curl_url}" > /dev/null; then + echo "keyserver ${keyserver}" + keyserver_reachable=true + else + echo "(*) Keyserver ${keyserver} is not reachable." >&2 + fi + } + + # Explicitly test these in order because Bash v4.4.20 (Ubuntu Bionic) + # enumerates associative array keys in a different order than Bash v5 + test_keyserver "hkp://keyserver.ubuntu.com" "http://keyserver.ubuntu.com:11371" + test_keyserver "hkp://keyserver.ubuntu.com:80" "http://keyserver.ubuntu.com" + test_keyserver "hkp://keyserver.pgp.com" "http://keyserver.pgp.com:11371" + # Test this server last because keys.openpgp.org strips user IDs from keys unless + # the owner gives permission, which causes gpg in Ubuntu Bionic to reject the key + # (https://github.com/devcontainers/features/issues/1055) + test_keyserver "hkps://keys.openpgp.org" "https://keys.openpgp.org" + + if ! $keyserver_reachable; then + echo "(!) No keyserver is reachable." >&2 + exit 1 + fi +} + +# Import the specified key in a variable name passed in as receive_gpg_keys() { local keys=${!1} local keyring_args="" @@ -34,21 +117,26 @@ receive_gpg_keys() { keyring_args="--no-default-keyring --keyring $2" fi + # Install curl + if ! type curl > /dev/null 2>&1; then + check_packages curl + fi + # Use a temporary location for gpg keys to avoid polluting image export GNUPGHOME="/tmp/tmp-gnupg" mkdir -p ${GNUPGHOME} chmod 700 ${GNUPGHOME} - echo -e "disable-ipv6\n${GPG_KEY_SERVERS}" > ${GNUPGHOME}/dirmngr.conf + echo -e "disable-ipv6\n$(get_gpg_key_servers)" > ${GNUPGHOME}/dirmngr.conf # GPG key download sometimes fails for some reason and retrying fixes it. local retry_count=0 local gpg_ok="false" set +e - until [ "${gpg_ok}" = "true" ] || [ "${retry_count}" -eq "5" ]; + until [ "${gpg_ok}" = "true" ] || [ "${retry_count}" -eq "5" ]; do echo "(*) Downloading GPG key..." ( echo "${keys}" | xargs -n 1 gpg -q ${keyring_args} --recv-keys) 2>&1 && gpg_ok="true" if [ "${gpg_ok}" != "true" ]; then - echo "(*) Failed getting key, retring in 10s..." + echo "(*) Failed getting key, retrying in 10s..." (( retry_count++ )) sleep 10s fi @@ -60,40 +148,84 @@ receive_gpg_keys() { fi } -apt_get_update() -{ - if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then - echo "Running apt-get update..." - apt-get update -y +pkg_mgr_update() { + if [ ${INSTALL_CMD} = "apt-get" ]; then + if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then + echo "Running apt-get update..." + ${INSTALL_CMD} update -y + fi + elif [ ${INSTALL_CMD} = "apk" ]; then + if [ "$(find /var/cache/apk/* | wc -l)" = "0" ]; then + echo "Running apk update..." + ${INSTALL_CMD} update + fi + elif [ ${INSTALL_CMD} = "dnf" ] || [ ${INSTALL_CMD} = "yum" ]; then + if [ "$(find /var/cache/${INSTALL_CMD}/* | wc -l)" = "0" ]; then + echo "Running ${INSTALL_CMD} check-update ..." + ${INSTALL_CMD} check-update + fi fi } + # Checks if packages are installed and installs them if not check_packages() { - if ! dpkg -s "$@" > /dev/null 2>&1; then - apt_get_update - apt-get -y install --no-install-recommends "$@" + if [ ${INSTALL_CMD} = "apt-get" ]; then + if ! dpkg -s "$@" > /dev/null 2>&1; then + pkg_mgr_update + ${INSTALL_CMD} -y install --no-install-recommends "$@" + fi + elif [ ${INSTALL_CMD} = "apk" ]; then + ${INSTALL_CMD} add \ + --no-cache \ + "$@" + elif [ ${INSTALL_CMD} = "dnf" ] || [ ${INSTALL_CMD} = "yum" ]; then + _num_pkgs=$(echo "$@" | tr ' ' \\012 | wc -l) + _num_installed=$(${INSTALL_CMD} -C list installed "$@" | sed '1,/^Installed/d' | wc -l) + if [ ${_num_pkgs} != ${_num_installed} ]; then + pkg_mgr_update + ${INSTALL_CMD} -y install "$@" + fi + elif [ ${INSTALL_CMD} = "microdnf" ]; then + ${INSTALL_CMD} -y install \ + --refresh \ + --best \ + --nodocs \ + --noplugins \ + --setopt=install_weak_deps=0 \ + "$@" + else + echo "Linux distro ${ID} not supported." + exit 1 fi } export DEBIAN_FRONTEND=noninteractive -# Source /etc/os-release to get OS info -. /etc/os-release +# Debian / Ubuntu packages # If the os provided version is "good enough", just install that. if [ ${GIT_VERSION} = "os-provided" ] || [ ${GIT_VERSION} = "system" ]; then if type git > /dev/null 2>&1; then echo "Detected existing system install: $(git version)" # Clean up - rm -rf /var/lib/apt/lists/* + clean_up exit 0 fi - echo "Installing git from OS apt repository" + if [ "$INSTALL_CMD" = "apt-get" ]; then + echo "Installing git from OS apt repository" + elif [ "$INSTALL_CMD" = "apk" ]; then + echo "Installing git from OS apk repository" + else + echo "Installing git from OS yum/dnf repository" + fi + if [ $ID = "mariner" ]; then + check_packages ca-certificates + fi check_packages git # Clean up - rm -rf /var/lib/apt/lists/* + clean_up exit 0 fi @@ -103,15 +235,55 @@ if ([ "${GIT_VERSION}" = "latest" ] || [ "${GIT_VERSION}" = "lts" ] || [ "${GIT_ check_packages apt-transport-https curl ca-certificates gnupg2 dirmngr receive_gpg_keys GIT_CORE_PPA_ARCHIVE_GPG_KEY /usr/share/keyrings/gitcoreppa-archive-keyring.gpg echo -e "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/gitcoreppa-archive-keyring.gpg] http://ppa.launchpad.net/git-core/ppa/ubuntu ${VERSION_CODENAME} main\ndeb-src [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/gitcoreppa-archive-keyring.gpg] http://ppa.launchpad.net/git-core/ppa/ubuntu ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/git-core-ppa.list - apt-get update - apt-get -y install --no-install-recommends git + ${INSTALL_CMD} update + ${INSTALL_CMD} -y install --no-install-recommends git rm -rf "/tmp/tmp-gnupg" rm -rf /var/lib/apt/lists/* exit 0 fi # Install required packages to build if missing -check_packages build-essential curl ca-certificates tar gettext libssl-dev zlib1g-dev libcurl?-openssl-dev libexpat1-dev +if [ "${ADJUSTED_ID}" = "debian" ]; then + + check_packages build-essential curl ca-certificates tar gettext libssl-dev zlib1g-dev libcurl?-openssl-dev libexpat1-dev + + check_packages libpcre2-dev + + if [ "${VERSION_CODENAME}" = "focal" ] || [ "${VERSION_CODENAME}" = "bullseye" ]; then + check_packages libpcre2-posix2 + elif [ "${VERSION_CODENAME}" = "bionic" ] || [ "${VERSION_CODENAME}" = "buster" ]; then + check_packages libpcre2-posix0 + else + check_packages libpcre2-posix3 + fi + +elif [ "${ADJUSTED_ID}" = "alpine" ]; then + + # update build dependencies + ${INSTALL_CMD} add --no-cache --update curl grep make zlib-dev + + # ref. + check_packages asciidoc curl-dev expat-dev g++ gcc openssl-dev pcre2-dev perl-dev perl-error python3-dev tcl tk xmlto + +elif [ "${ADJUSTED_ID}" = "rhel" ]; then + check_packages gcc libcurl-devel expat-devel gettext-devel openssl-devel perl-devel zlib-devel cmake pcre2-devel tar gzip ca-certificates + if ! type curl > /dev/null 2>&1; then + check_packages curl + fi + if ! type cmp > /dev/null 2>&1; then + check_packages diffutils + fi + if ! type awk > /dev/null 2>&1; then + check_packages gawk + fi + if [ $ID = "mariner" ]; then + check_packages glibc-devel kernel-headers binutils + fi + +else + echo "Linux distro ${ID} not supported." + exit 1 +fi # Partial version matching if [ "$(echo "${GIT_VERSION}" | grep -o '\.' | wc -l)" != "2" ]; then @@ -130,21 +302,19 @@ if [ "$(echo "${GIT_VERSION}" | grep -o '\.' | wc -l)" != "2" ]; then fi fi -check_packages libpcre2-dev - -if [ "${VERSION_CODENAME}" = "focal" ] || [ "${VERSION_CODENAME}" = "bullseye" ]; then - check_packages libpcre2-posix2 -elif [ "${VERSION_CODENAME}" = "bionic" ] || [ "${VERSION_CODENAME}" = "buster" ]; then - check_packages libpcre2-posix0 -else - check_packages libpcre2-posix3 -fi - echo "Downloading source for ${GIT_VERSION}..." curl -sL https://github.com/git/git/archive/v${GIT_VERSION}.tar.gz | tar -xzC /tmp 2>&1 echo "Building..." cd /tmp/git-${GIT_VERSION} -make -s USE_LIBPCRE=YesPlease prefix=/usr/local sysconfdir=/etc all && make -s USE_LIBPCRE=YesPlease prefix=/usr/local sysconfdir=/etc install 2>&1 +git_options=("prefix=/usr/local") +git_options+=("sysconfdir=/etc") +git_options+=("USE_LIBPCRE=YesPlease") +if [ "${ADJUSTED_ID}" = "alpine" ]; then + # ref. + git_options+=("NO_REGEX=YesPlease") + git_options+=("NO_GETTEXT=YesPlease") +fi +make -s "${git_options[@]}" all && make -s "${git_options[@]}" install 2>&1 rm -rf /tmp/git-${GIT_VERSION} -rm -rf /var/lib/apt/lists/* +clean_up echo "Done!" diff --git a/src/github-cli/devcontainer-feature.json b/src/github-cli/devcontainer-feature.json index 24ef9802f..b3eca81f0 100644 --- a/src/github-cli/devcontainer-feature.json +++ b/src/github-cli/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "github-cli", - "version": "1.0.10", + "version": "1.0.15", "name": "GitHub CLI", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/github-cli", "description": "Installs the GitHub CLI. Auto-detects latest version and installs needed dependencies.", @@ -19,6 +19,17 @@ "default": true } }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the GitHub CLI (`gh`), which is pre-installed and available on the `PATH`. IMPORTANT: `gh api -f` does not support object values, use multiple `-f` flags with hierarchical keys and string values instead. When using GitHub actions `actions/upload-artifact` or `actions/download-artifact` use v4 or later." + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils", "ghcr.io/devcontainers/features/git" diff --git a/src/github-cli/install.sh b/src/github-cli/install.sh index c1dd6924e..11af21d08 100755 --- a/src/github-cli/install.sh +++ b/src/github-cli/install.sh @@ -11,9 +11,6 @@ CLI_VERSION=${VERSION:-"latest"} INSTALL_DIRECTLY_FROM_GITHUB_RELEASE=${INSTALLDIRECTLYFROMGITHUBRELEASE:-"true"} GITHUB_CLI_ARCHIVE_GPG_KEY=23F3D4EA75716059 -GPG_KEY_SERVERS="keyserver hkp://keyserver.ubuntu.com -keyserver hkps://keys.openpgp.org -keyserver hkp://keyserver.pgp.com" set -e @@ -25,6 +22,37 @@ if [ "$(id -u)" -ne 0 ]; then exit 1 fi +# Get the list of GPG key servers that are reachable +get_gpg_key_servers() { + declare -A keyservers_curl_map=( + ["hkp://keyserver.ubuntu.com"]="http://keyserver.ubuntu.com:11371" + ["hkp://keyserver.ubuntu.com:80"]="http://keyserver.ubuntu.com" + ["hkps://keys.openpgp.org"]="https://keys.openpgp.org" + ["hkp://keyserver.pgp.com"]="http://keyserver.pgp.com:11371" + ) + + local curl_args="" + local keyserver_reachable=false # Flag to indicate if any keyserver is reachable + + if [ ! -z "${KEYSERVER_PROXY}" ]; then + curl_args="--proxy ${KEYSERVER_PROXY}" + fi + + for keyserver in "${!keyservers_curl_map[@]}"; do + local keyserver_curl_url="${keyservers_curl_map[${keyserver}]}" + if curl -s ${curl_args} --max-time 5 ${keyserver_curl_url} > /dev/null; then + echo "keyserver ${keyserver}" + keyserver_reachable=true + else + echo "(*) Keyserver ${keyserver} is not reachable." >&2 + fi + done + + if ! $keyserver_reachable; then + echo "(!) No keyserver is reachable." >&2 + exit 1 + fi +} # Import the specified key in a variable name passed in as receive_gpg_keys() { @@ -34,11 +62,16 @@ receive_gpg_keys() { keyring_args="--no-default-keyring --keyring $2" fi + # Install curl + if ! type curl > /dev/null 2>&1; then + check_packages curl + fi + # Use a temporary location for gpg keys to avoid polluting image export GNUPGHOME="/tmp/tmp-gnupg" mkdir -p ${GNUPGHOME} chmod 700 ${GNUPGHOME} - echo -e "disable-ipv6\n${GPG_KEY_SERVERS}" > ${GNUPGHOME}/dirmngr.conf + echo -e "disable-ipv6\n$(get_gpg_key_servers)" > ${GNUPGHOME}/dirmngr.conf # GPG key download sometimes fails for some reason and retrying fixes it. local retry_count=0 local gpg_ok="false" @@ -48,7 +81,7 @@ receive_gpg_keys() { echo "(*) Downloading GPG key..." ( echo "${keys}" | xargs -n 1 gpg -q ${keyring_args} --recv-keys) 2>&1 && gpg_ok="true" if [ "${gpg_ok}" != "true" ]; then - echo "(*) Failed getting key, retring in 10s..." + echo "(*) Failed getting key, retrying in 10s..." (( retry_count++ )) sleep 10s fi @@ -162,14 +195,14 @@ install_deb_using_github() { mkdir -p /tmp/ghcli pushd /tmp/ghcli - wget https://github.com/cli/cli/releases/download/v${CLI_VERSION}/${cli_filename} + wget -q --show-progress --progress=dot:giga https://github.com/cli/cli/releases/download/v${CLI_VERSION}/${cli_filename} exit_code=$? set -e if [ "$exit_code" != "0" ]; then # Handle situation where git tags are ahead of what was is available to actually download echo "(!) github-cli version ${CLI_VERSION} failed to download. Attempting to fall back one version to retry..." find_prev_version_from_git_tags CLI_VERSION https://github.com/cli/cli - wget https://github.com/cli/cli/releases/download/v${CLI_VERSION}/${cli_filename} + wget -q --show-progress --progress=dot:giga https://github.com/cli/cli/releases/download/v${CLI_VERSION}/${cli_filename} fi dpkg -i /tmp/ghcli/${cli_filename} diff --git a/src/go/NOTES.md b/src/go/NOTES.md index 19fe92f31..79a308cf5 100644 --- a/src/go/NOTES.md +++ b/src/go/NOTES.md @@ -2,6 +2,6 @@ ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +This Feature should work on recent versions of Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and RockyLinux distributions with the apt, yum, dnf, or microdnf package manager installed. `bash` is required to execute the `install.sh` script. diff --git a/src/go/README.md b/src/go/README.md index be035ee85..5f35da948 100644 --- a/src/go/README.md +++ b/src/go/README.md @@ -28,7 +28,7 @@ Installs Go and common Go utilities. Auto-detects latest version and installs ne ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +This Feature should work on recent versions of Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and RockyLinux distributions with the apt, yum, dnf, or microdnf package manager installed. `bash` is required to execute the `install.sh` script. diff --git a/src/go/devcontainer-feature.json b/src/go/devcontainer-feature.json index 9cd89b26c..f98e65a7f 100644 --- a/src/go/devcontainer-feature.json +++ b/src/go/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "go", - "version": "1.2.1", + "version": "1.3.2", "name": "Go", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/go", "description": "Installs Go and common Go utilities. Auto-detects latest version and installs needed dependencies.", @@ -10,8 +10,8 @@ "proposals": [ "latest", "none", - "1.21", - "1.20" + "1.24", + "1.23" ], "default": "latest", "description": "Select or enter a Go version to install" @@ -27,7 +27,14 @@ "vscode": { "extensions": [ "golang.Go" - ] + ], + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes Go and common Go utilities pre-installed and available on the `PATH`, along with the Go language extension for Go development." + } + ] + } } }, "containerEnv": { diff --git a/src/go/install.sh b/src/go/install.sh index f79fc6f3d..85fea5dc4 100755 --- a/src/go/install.sh +++ b/src/go/install.sh @@ -20,36 +20,68 @@ GO_GPG_KEY_URI="https://dl.google.com/linux/linux_signing_key.pub" set -e -# Clean up -rm -rf /var/lib/apt/lists/* - if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' exit 1 fi -# Ensure that login shells get the correct path if the user updated the PATH using ENV. -rm -f /etc/profile.d/00-restore-env.sh -echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh -chmod +x /etc/profile.d/00-restore-env.sh - -# Determine the appropriate non-root user -if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then - USERNAME="" - POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") - for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do - if id -u ${CURRENT_USER} > /dev/null 2>&1; then - USERNAME=${CURRENT_USER} - break - fi - done - if [ "${USERNAME}" = "" ]; then - USERNAME=root +# Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME +. /etc/os-release +# Get an adjusted ID independent of distro variants +MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1) +if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then + ADJUSTED_ID="debian" +elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then + ADJUSTED_ID="rhel" + if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then + VERSION_CODENAME="rhel${MAJOR_VERSION_ID}" + else + VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}" fi -elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then - USERNAME=root +else + echo "Linux distro ${ID} not supported." + exit 1 +fi + +if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then + # As of 1 July 2024, mirrorlist.centos.org no longer exists. + # Update the repo files to reference vault.centos.org. + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo +fi + +# Setup INSTALL_CMD & PKG_MGR_CMD +if type apt-get > /dev/null 2>&1; then + PKG_MGR_CMD=apt-get + INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends" +elif type microdnf > /dev/null 2>&1; then + PKG_MGR_CMD=microdnf + INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0" +elif type dnf > /dev/null 2>&1; then + PKG_MGR_CMD=dnf + INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0" +else + PKG_MGR_CMD=yum + INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0" fi +# Clean up +clean_up() { + case ${ADJUSTED_ID} in + debian) + rm -rf /var/lib/apt/lists/* + ;; + rhel) + rm -rf /var/cache/dnf/* /var/cache/yum/* + rm -rf /tmp/yum.log + rm -rf ${GPG_INSTALL_PATH} + ;; + esac +} +clean_up + + # Figure out correct version of a three part version number is not passed find_version_from_git_tags() { local variable_name=$1 @@ -84,44 +116,108 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } -# Get central common setting -get_common_setting() { - if [ "${common_settings_file_loaded}" != "true" ]; then - curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping." - common_settings_file_loaded=true - fi - if [ -f "/tmp/vsdc-settings.env" ]; then - local multi_line="" - if [ "$2" = "true" ]; then multi_line="-z"; fi - local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')" - if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi - fi - echo "$1=${!1}" -} - -apt_get_update() -{ - if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then - echo "Running apt-get update..." - apt-get update -y - fi +pkg_mgr_update() { + case $ADJUSTED_ID in + debian) + if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then + echo "Running apt-get update..." + ${PKG_MGR_CMD} update -y + fi + ;; + rhel) + if [ ${PKG_MGR_CMD} = "microdnf" ]; then + if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then + echo "Running ${PKG_MGR_CMD} makecache ..." + ${PKG_MGR_CMD} makecache + fi + else + if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then + echo "Running ${PKG_MGR_CMD} check-update ..." + set +e + ${PKG_MGR_CMD} check-update + rc=$? + if [ $rc != 0 ] && [ $rc != 100 ]; then + exit 1 + fi + set -e + fi + fi + ;; + esac } # Checks if packages are installed and installs them if not check_packages() { - if ! dpkg -s "$@" > /dev/null 2>&1; then - apt_get_update - apt-get -y install --no-install-recommends "$@" - fi + case ${ADJUSTED_ID} in + debian) + if ! dpkg -s "$@" > /dev/null 2>&1; then + pkg_mgr_update + ${INSTALL_CMD} "$@" + fi + ;; + rhel) + if ! rpm -q "$@" > /dev/null 2>&1; then + pkg_mgr_update + ${INSTALL_CMD} "$@" + fi + ;; + esac } +# Ensure that login shells get the correct path if the user updated the PATH using ENV. +rm -f /etc/profile.d/00-restore-env.sh +echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh +chmod +x /etc/profile.d/00-restore-env.sh + +# Some distributions do not install awk by default (e.g. Mariner) +if ! type awk >/dev/null 2>&1; then + check_packages awk +fi + +# Determine the appropriate non-root user +if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then + USERNAME="" + POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") + for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do + if id -u ${CURRENT_USER} > /dev/null 2>&1; then + USERNAME=${CURRENT_USER} + break + fi + done + if [ "${USERNAME}" = "" ]; then + USERNAME=root + fi +elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then + USERNAME=root +fi + export DEBIAN_FRONTEND=noninteractive -# Install curl, tar, git, other dependencies if missing -check_packages curl ca-certificates gnupg2 tar g++ gcc libc6-dev make pkg-config +check_packages ca-certificates gnupg2 tar gcc make pkg-config + +if [ $ADJUSTED_ID = "debian" ]; then + check_packages g++ libc6-dev +else + check_packages gcc-c++ glibc-devel +fi +# Install curl, git, other dependencies if missing +if ! type curl > /dev/null 2>&1; then + check_packages curl +fi if ! type git > /dev/null 2>&1; then check_packages git fi +# Some systems, e.g. Mariner, still a few more packages +if ! type as > /dev/null 2>&1; then + check_packages binutils +fi +if ! [ -f /usr/include/linux/errno.h ]; then + check_packages kernel-headers +fi +# Minimal RHEL install may need findutils installed +if ! [ -f /usr/bin/find ]; then + check_packages findutils +fi # Get closest match for version number specified find_version_from_git_tags TARGET_GO_VERSION "https://go.googlesource.com/go" "tags/go" "." "true" @@ -143,12 +239,11 @@ fi usermod -a -G golang "${USERNAME}" mkdir -p "${TARGET_GOROOT}" "${TARGET_GOPATH}" -if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version)" != *"${TARGET_GO_VERSION}"* ]]; then +if [[ "${TARGET_GO_VERSION}" != "none" ]] && [[ "$(go version 2>/dev/null)" != *"${TARGET_GO_VERSION}"* ]]; then # Use a temporary location for gpg keys to avoid polluting image export GNUPGHOME="/tmp/tmp-gnupg" mkdir -p ${GNUPGHOME} chmod 700 ${GNUPGHOME} - get_common_setting GO_GPG_KEY_URI curl -sSL -o /tmp/tmp-gnupg/golang_key "${GO_GPG_KEY_URI}" gpg -q --import /tmp/tmp-gnupg/golang_key echo "Downloading Go ${TARGET_GO_VERSION}..." @@ -246,6 +341,6 @@ find "${TARGET_GOROOT}" -type d -print0 | xargs -n 1 -0 chmod g+s find "${TARGET_GOPATH}" -type d -print0 | xargs -n 1 -0 chmod g+s # Clean up -rm -rf /var/lib/apt/lists/* +clean_up echo "Done!" diff --git a/src/hugo/devcontainer-feature.json b/src/hugo/devcontainer-feature.json index d6358d703..376826509 100644 --- a/src/hugo/devcontainer-feature.json +++ b/src/hugo/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "hugo", - "version": "1.1.2", + "version": "1.1.3", "name": "Hugo", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/hugo", "options": { @@ -22,6 +22,17 @@ "HUGO_DIR": "/usr/local/hugo", "PATH": "/usr/local/hugo/bin:${PATH}" }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes Hugo, a popular open-source static site generator written in Go, pre-installed and available on the `PATH`." + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] diff --git a/src/java/NOTES.md b/src/java/NOTES.md index 63622afaf..edc310e23 100644 --- a/src/java/NOTES.md +++ b/src/java/NOTES.md @@ -5,6 +5,6 @@ For the Java Feature from this repository, see [NOTICE.txt](https://github.com/d ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and RockyLinux distributions with the `apt`, `yum`, `dnf`, or `microdnf` package manager installed. `bash` is required to execute the `install.sh` script. diff --git a/src/java/README.md b/src/java/README.md index 1ffd6188b..1a2d91851 100644 --- a/src/java/README.md +++ b/src/java/README.md @@ -16,6 +16,7 @@ Installs Java, SDKMAN! (if not installed), and needed dependencies. | Options Id | Description | Type | Default Value | |-----|-----|-----|-----| | version | Select or enter a Java version to install | string | latest | +| additionalVersions | Enter additional Java versions, separated by commas. | string | - | | jdkDistro | Select or enter a JDK distribution | string | ms | | installGradle | Install Gradle, a build automation tool for multi-language software development | boolean | false | | gradleVersion | Select or enter a Gradle version | string | latest | @@ -23,6 +24,8 @@ Installs Java, SDKMAN! (if not installed), and needed dependencies. | mavenVersion | Select or enter a Maven version | string | latest | | installAnt | Install Ant, a software tool for automating software build processes | boolean | false | | antVersion | Select or enter an Ant version | string | latest | +| installGroovy | Install Groovy, powerful, optionally typed and dynamic language with static-typing and static compilation capabilities | boolean | false | +| groovyVersion | Select or enter a Groovy version | string | latest | ## Customizations @@ -37,7 +40,7 @@ For the Java Feature from this repository, see [NOTICE.txt](https://github.com/d ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and RockyLinux distributions with the `apt`, `yum`, `dnf`, or `microdnf` package manager installed. `bash` is required to execute the `install.sh` script. diff --git a/src/java/devcontainer-feature.json b/src/java/devcontainer-feature.json index 7bdec6ab0..4198af326 100644 --- a/src/java/devcontainer-feature.json +++ b/src/java/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "java", - "version": "1.2.1", + "version": "1.6.3", "name": "Java (via SDKMAN!)", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/java", "description": "Installs Java, SDKMAN! (if not installed), and needed dependencies.", @@ -10,6 +10,7 @@ "proposals": [ "latest", "none", + "21", "17", "11", "8" @@ -17,13 +18,19 @@ "default": "latest", "description": "Select or enter a Java version to install" }, + "additionalVersions": { + "type": "string", + "default": "", + "description": "Enter additional Java versions, separated by commas." + }, "jdkDistro": { "type": "string", "proposals": [ "ms", "open", "oracle", - "tem" + "tem", + "amzn" ], "default": "ms", "description": "Select or enter a JDK distribution" @@ -74,6 +81,22 @@ ], "default": "latest", "description": "Select or enter an Ant version" + }, + "installGroovy": { + "type": "boolean", + "default": false, + "description": "Install Groovy, powerful, optionally typed and dynamic language with static-typing and static compilation capabilities" + }, + "groovyVersion": { + "type": "string", + "proposals": [ + "latest", + "2.5.22", + "3.0.19", + "4.0.16" + ], + "default": "latest", + "description": "Select or enter a Groovy version" } }, "customizations": { @@ -82,7 +105,12 @@ "vscjava.vscode-java-pack" ], "settings": { - "java.import.gradle.java.home": "/usr/local/sdkman/candidates/java/current" + "java.import.gradle.java.home": "/usr/local/sdkman/candidates/java/current", + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes Java, SDKMAN! and needed dependencies pre-installed and available on the `PATH`, along with the Java language extension pack for Java development." + } + ] } } }, @@ -92,6 +120,6 @@ "PATH": "/usr/local/sdkman/bin:/usr/local/sdkman/candidates/java/current/bin:/usr/local/sdkman/candidates/gradle/current/bin:/usr/local/sdkman/candidates/maven/current/bin:/usr/local/sdkman/candidates/ant/current/bin:${PATH}" }, "installsAfter": [ - "ghcr.io/devcontainers/features/common-utils" + "ghcr.io/devcontainers/features/common-utils" ] -} +} \ No newline at end of file diff --git a/src/java/install.sh b/src/java/install.sh index ea43dd3e9..62fd39462 100644 --- a/src/java/install.sh +++ b/src/java/install.sh @@ -9,14 +9,16 @@ # # Syntax: ./java-debian.sh [JDK version] [SDKMAN_DIR] [non-root user] [Add to rc files flag] -JAVA_VERSION="${VERSION:-"lts"}" +JAVA_VERSION="${VERSION:-"latest"}" INSTALL_GRADLE="${INSTALLGRADLE:-"false"}" GRADLE_VERSION="${GRADLEVERSION:-"latest"}" INSTALL_MAVEN="${INSTALLMAVEN:-"false"}" MAVEN_VERSION="${MAVENVERSION:-"latest"}" INSTALL_ANT="${INSTALLANT:-"false"}" ANT_VERSION="${ANTVERSION:-"latest"}" -JDK_DISTRO="${JDKDISTRO}" +INSTALL_GROOVY="${INSTALLGROOVY:-"false"}" +GROOVY_VERSION="${GROOVYVERSION:-"latest"}" +JDK_DISTRO="${JDKDISTRO:-"ms"}" export SDKMAN_DIR="${SDKMAN_DIR:-"/usr/local/sdkman"}" USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}" @@ -28,14 +30,125 @@ ADDITIONAL_VERSIONS="${ADDITIONALVERSIONS:-""}" set -e -# Clean up -rm -rf /var/lib/apt/lists/* - if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' exit 1 fi +# Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME +. /etc/os-release +# Get an adjusted ID independent of distro variants +MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1) +if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then + ADJUSTED_ID="debian" +elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then + ADJUSTED_ID="rhel" + if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then + VERSION_CODENAME="rhel${MAJOR_VERSION_ID}" + else + VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}" + fi +else + echo "Linux distro ${ID} not supported." + exit 1 +fi + +# Setup INSTALL_CMD & PKG_MGR_CMD +if type apt-get > /dev/null 2>&1; then + PKG_MGR_CMD=apt-get + INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends" +elif type microdnf > /dev/null 2>&1; then + PKG_MGR_CMD=microdnf + INSTALL_CMD="${PKG_MGR_CMD} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0" +elif type dnf > /dev/null 2>&1; then + PKG_MGR_CMD=dnf + INSTALL_CMD="${PKG_MGR_CMD} -y install" +elif type yum > /dev/null 2>&1; then + PKG_MGR_CMD=yum + INSTALL_CMD="${PKG_MGR_CMD} -y install" +else + echo "(Error) Unable to find a supported package manager." + exit 1 +fi + +pkg_manager_update() { + case $ADJUSTED_ID in + debian) + if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then + echo "Running apt-get update..." + ${PKG_MGR_CMD} update -y + fi + ;; + rhel) + if [ ${PKG_MGR_CMD} = "microdnf" ]; then + if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then + echo "Running ${PKG_MGR_CMD} makecache ..." + ${PKG_MGR_CMD} makecache + fi + else + if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then + echo "Running ${PKG_MGR_CMD} check-update ..." + set +e + stderr_messages=$(${PKG_MGR_CMD} -q check-update 2>&1) + rc=$? + # centos 7 sometimes returns a status of 100 when it apears to work. + if [ $rc != 0 ] && [ $rc != 100 ]; then + echo "(Error) ${PKG_MGR_CMD} check-update produced the following error message(s):" + echo "${stderr_messages}" + exit 1 + fi + set -e + fi + fi + ;; + esac +} + +# Checks if packages are installed and installs them if not +check_packages() { + case ${ADJUSTED_ID} in + debian) + if ! dpkg -s "$@" > /dev/null 2>&1; then + pkg_manager_update + ${INSTALL_CMD} "$@" + fi + ;; + rhel) + if ! rpm -q "$@" > /dev/null 2>&1; then + pkg_manager_update + ${INSTALL_CMD} "$@" + fi + ;; + esac +} + +if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then + # As of 1 July 2024, mirrorlist.centos.org no longer exists. + # Update the repo files to reference vault.centos.org. + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo + yum update -y + check_packages epel-release +fi + +# Clean up +clean_up() { + local pkg + case ${ADJUSTED_ID} in + debian) + rm -rf /var/lib/apt/lists/* + ;; + rhel) + for pkg in epel-release epel-release-latest packages-microsoft-prod; do + ${PKG_MGR_CMD} -y remove $pkg 2>/dev/null || /bin/true + done + rm -rf /var/cache/dnf/* /var/cache/yum/* + rm -f /etc/yum.repos.d/docker-ce.repo + ;; + esac +} + # Ensure that login shells get the correct path if the user updated the PATH using ENV. rm -f /etc/profile.d/00-restore-env.sh echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh @@ -59,41 +172,67 @@ elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then fi updaterc() { + local _bashrc + local _zshrc if [ "${UPDATE_RC}" = "true" ]; then - echo "Updating /etc/bash.bashrc and /etc/zsh/zshrc..." - if [[ "$(cat /etc/bash.bashrc)" != *"$1"* ]]; then - echo -e "$1" >> /etc/bash.bashrc + case $ADJUSTED_ID in + debian) + _bashrc=/etc/bash.bashrc + _zshrc=/etc/zsh/zshrc + ;; + rhel) + _bashrc=/etc/bashrc + _zshrc=/etc/zshrc + ;; + esac + echo "Updating ${_bashrc} and ${_zshrc}..." + if [[ "$(cat ${_bashrc})" != *"$1"* ]]; then + echo -e "$1" >> "${_bashrc}" fi - if [ -f "/etc/zsh/zshrc" ] && [[ "$(cat /etc/zsh/zshrc)" != *"$1"* ]]; then - echo -e "$1" >> /etc/zsh/zshrc + if [ -f "${_zshrc}" ] && [[ "$(cat ${_zshrc})" != *"$1"* ]]; then + echo -e "$1" >> "${_zshrc}" fi fi } -apt_get_update() -{ - if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then - echo "Running apt-get update..." - apt-get update -y - fi -} - -# Checks if packages are installed and installs them if not -check_packages() { - if ! dpkg -s "$@" > /dev/null 2>&1; then - apt_get_update - apt-get -y install --no-install-recommends "$@" +find_version_list() { + prefix="$1" + suffix="$2" + install_type=$3 + ifLts="$4" + version_list=$5 + java_ver=$6 + + check_packages jq + all_versions=$(curl -s https://api.adoptium.net/v3/info/available_releases) + if [ "${ifLts}" = "true" ]; then + major_version=$(echo "$all_versions" | jq -r '.most_recent_lts') + elif [ "${java_ver}" = "latest" ]; then + major_version=$(echo "$all_versions" | jq -r '.most_recent_feature_release') + else + major_version=$(echo "$java_ver" | cut -d '.' -f 1) fi -} - -# Use Microsoft JDK for everything but JDK 8 and 18 (unless specified differently with jdkDistro option) -get_jdk_distro() { - VERSION="$1" + + # Remove the hardcoded fallback as this fails for new jdk latest version released ex: 24 + # Related Issue: https://github.com/devcontainers/features/issues/1308 if [ "${JDK_DISTRO}" = "ms" ]; then - if echo "${VERSION}" | grep -E '^8([\s\.]|$)' > /dev/null 2>&1 || echo "${VERSION}" | grep -E '^18([\s\.]|$)' > /dev/null 2>&1; then + # Check if the requested version is available in the 'ms' distribution + echo "Check if OpenJDK is available for version ${major_version} for ${JDK_DISTRO} Distro" + available_versions=$(su ${USERNAME} -c ". ${SDKMAN_DIR}/bin/sdkman-init.sh && sdk list ${install_type} | grep ${JDK_DISTRO} | grep -oE '[0-9]+(\.[0-9]+(\.[0-9]+)?)?' | sort -u") + if echo "${available_versions}" | grep -q "^${major_version}"; then + echo "JDK version ${major_version} is available in ${JDK_DISTRO}..." + else + echo "JDK version ${major_version} not available in ${JDK_DISTRO}.... Switching to (tem)." JDK_DISTRO="tem" fi fi + echo "JDK_DISTRO: ${JDK_DISTRO}" + if [ "${install_type}" != "java" ]; then + regex="${prefix}\\K[0-9]+\\.?[0-9]*\\.?[0-9]*${suffix}" + else + regex="${prefix}\\K${major_version}\\.?[0-9]*\\.?[0-9]*${suffix}${JDK_DISTRO}\\s*" + fi + declare -g ${version_list}="$(su ${USERNAME} -c ". \${SDKMAN_DIR}/bin/sdkman-init.sh && sdk list ${install_type} 2>&1 | grep -oP \"${regex}\" | tr -d ' ' | sort -rV")" } # Use SDKMAN to install something using a partial version match @@ -104,20 +243,25 @@ sdk_install() { local suffix="${4:-"\\s*"}" local full_version_check=${5:-".*-[a-z]+"} local set_as_default=${6:-"true"} + pkgs=("maven" "gradle" "ant" "groovy") + pkg_vals="${pkgs[@]}" if [ "${requested_version}" = "none" ]; then return; fi - # Blank will install latest stable version SDKMAN has - if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "lts" ] || [ "${requested_version}" = "default" ]; then - requested_version="" + if [ "${requested_version}" = "default" ]; then + requested_version="" + elif [[ "${pkg_vals}" =~ "${install_type}" ]] && [ "${requested_version}" = "latest" ]; then + requested_version="" + elif [ "${requested_version}" = "lts" ]; then + find_version_list "$prefix" "$suffix" "$install_type" "true" version_list "${requested_version}" + requested_version="$(echo "${version_list}" | head -n 1)" elif echo "${requested_version}" | grep -oE "${full_version_check}" > /dev/null 2>&1; then echo "${requested_version}" - else - local regex="${prefix}\\K[0-9]+\\.[0-9]+\\.[0-9]+${suffix}" - local version_list=$(su ${USERNAME} -c ". \${SDKMAN_DIR}/bin/sdkman-init.sh && sdk list ${install_type} 2>&1 | grep -oP \"${regex}\" | tr -d ' ' | sort -rV") + else + find_version_list "$prefix" "$suffix" "$install_type" "false" version_list "${requested_version}" if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ]; then requested_version="$(echo "${version_list}" | head -n 1)" else set +e - requested_version="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")" + requested_version="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|-|$)")" set -e fi if [ -z "${requested_version}" ] || ! echo "${version_list}" | grep "^${requested_version//./\\.}$" > /dev/null 2>&1; then @@ -140,8 +284,19 @@ if [ "${architecture}" != "amd64" ] && [ "${architecture}" != "x86_64" ] && [ "$ exit 1 fi -# Install dependencies -check_packages curl ca-certificates zip unzip sed +# Install dependencies, +check_packages ca-certificates zip unzip sed findutils util-linux tar +# Make sure passwd (Debian) and shadow-utils RHEL family is installed +if [ ${ADJUSTED_ID} = "debian" ]; then + check_packages passwd +elif [ ${ADJUSTED_ID} = "rhel" ]; then + check_packages shadow-utils +fi +# minimal RHEL installs may not include curl, or includes curl-minimal instead. +# Install curl if the "curl" command is not present. +if ! type curl > /dev/null 2>&1; then + check_packages curl +fi # Install sdkman if not installed if [ ! -d "${SDKMAN_DIR}" ]; then @@ -159,8 +314,7 @@ if [ ! -d "${SDKMAN_DIR}" ]; then updaterc "export SDKMAN_DIR=${SDKMAN_DIR}\n. \${SDKMAN_DIR}/bin/sdkman-init.sh" fi -get_jdk_distro ${JAVA_VERSION} -sdk_install java ${JAVA_VERSION} "\\s*" "(\\.[a-z0-9]+)*-${JDK_DISTRO}\\s*" ".*-[a-z]+$" "true" +sdk_install java ${JAVA_VERSION} "\\s*" "(\\.[a-z0-9]+)*-" ".*-[a-z]+$" "true" # Additional java versions to be installed but not be set as default. if [ ! -z "${ADDITIONAL_VERSIONS}" ]; then @@ -168,29 +322,33 @@ if [ ! -z "${ADDITIONAL_VERSIONS}" ]; then IFS="," read -a additional_versions <<< "$ADDITIONAL_VERSIONS" for version in "${additional_versions[@]}"; do - get_jdk_distro ${version} - sdk_install java ${version} "\\s*" "(\\.[a-z0-9]+)*-${JDK_DISTRO}\\s*" ".*-[a-z]+$" "false" + sdk_install java ${version} "\\s*" "(\\.[a-z0-9]+)*-" ".*-[a-z]+$" "false" done IFS=$OLDIFS su ${USERNAME} -c ". ${SDKMAN_DIR}/bin/sdkman-init.sh && sdk default java ${JAVA_VERSION}" fi # Install Ant -if [[ "${INSTALL_ANT}" = "true" ]] && ! ant -version > /dev/null; then +if [[ "${INSTALL_ANT}" = "true" ]] && ! ant -version > /dev/null 2>&1; then sdk_install ant ${ANT_VERSION} fi # Install Gradle -if [[ "${INSTALL_GRADLE}" = "true" ]] && ! gradle --version > /dev/null; then +if [[ "${INSTALL_GRADLE}" = "true" ]] && ! gradle --version > /dev/null 2>&1; then sdk_install gradle ${GRADLE_VERSION} fi # Install Maven -if [[ "${INSTALL_MAVEN}" = "true" ]] && ! mvn --version > /dev/null; then +if [[ "${INSTALL_MAVEN}" = "true" ]] && ! mvn --version > /dev/null 2>&1; then sdk_install maven ${MAVEN_VERSION} fi +# Install Groovy +if [[ "${INSTALL_GROOVY}" = "true" ]] && ! groovy --version > /dev/null 2>&1; then + sdk_install groovy "${GROOVY_VERSION}" +fi + # Clean up -rm -rf /var/lib/apt/lists/* +clean_up echo "Done!" diff --git a/src/kubectl-helm-minikube/devcontainer-feature.json b/src/kubectl-helm-minikube/devcontainer-feature.json index 9229a96e6..410a909e4 100644 --- a/src/kubectl-helm-minikube/devcontainer-feature.json +++ b/src/kubectl-helm-minikube/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "kubectl-helm-minikube", - "version": "1.1.4", + "version": "1.2.2", "name": "Kubectl, Helm, and Minikube", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/kubectl-helm-minikube", "description": "Installs latest version of kubectl, Helm, and optionally minikube. Auto-detects latest versions and installs needed dependencies.", @@ -44,6 +44,17 @@ "type": "volume" } ], + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes kubectl, Helm, optionally minikube, and needed dependencies pre-installed and available on the `PATH`. When configuring Ingress for your Kubernetes cluster, note that by default Kubernetes will bind to a specific interface's IP rather than localhost or all interfaces. This is why you need to use the Kubernetes Node's IP when connecting - even if there's only one Node as in the case of Minikube." + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] diff --git a/src/kubectl-helm-minikube/install.sh b/src/kubectl-helm-minikube/install.sh index d2d11364e..f0cf1c946 100755 --- a/src/kubectl-helm-minikube/install.sh +++ b/src/kubectl-helm-minikube/install.sh @@ -22,9 +22,6 @@ MINIKUBE_SHA256="${MINIKUBE_SHA256:-"automatic"}" USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}" HELM_GPG_KEYS_URI="https://raw.githubusercontent.com/helm/helm/main/KEYS" -GPG_KEY_SERVERS="keyserver hkp://keyserver.ubuntu.com -keyserver hkps://keys.openpgp.org -keyserver hkp://keyserver.pgp.com" if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' @@ -57,11 +54,12 @@ fi find_version_from_git_tags() { local variable_name=$1 local requested_version=${!variable_name} + requested_version="${requested_version#v}" if [ "${requested_version}" = "none" ]; then return; fi local repository=$2 local prefix=${3:-"tags/v"} local separator=${4:-"."} - local last_part_optional=${5:-"false"} + local last_part_optional=${5:-"false"} if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then local escaped_separator=${separator//./\\.} local last_part @@ -87,6 +85,47 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + apt_get_update() { if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then @@ -147,30 +186,105 @@ if [ ${KUBECTL_VERSION} != "none" ]; then kubectl completion bash > /etc/bash_completion.d/kubectl # kubectl zsh completion - if [ -e "${USERHOME}}/.oh-my-zsh" ]; then + if [ -e "${USERHOME}/.oh-my-zsh" ]; then mkdir -p "${USERHOME}/.oh-my-zsh/completions" kubectl completion zsh > "${USERHOME}/.oh-my-zsh/completions/_kubectl" chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh" fi fi +# Function to fetch the version released prior to the latest version +get_previous_version() { + local url=$1 + local repo_url=$2 + local variable_name=$3 + prev_version=${!variable_name#v} + + output=$(curl -s "$repo_url"); + + check_packages jq + + message=$(echo "$output" | jq -r '.message') + if [[ $message == "API rate limit exceeded"* ]]; then + echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}" + echo -e "\nAttempting to find latest version using GitHub tags." + find_prev_version_from_git_tags prev_version "$url" "tags/v" + declare -g ${variable_name}="v${prev_version}" + else + echo -e "\nAttempting to find latest version using GitHub Api." + version=$(echo "$output" | jq -r '.tag_name') + declare -g ${variable_name}="${version}" + fi + echo "${variable_name}=${!variable_name}" +} + +get_github_api_repo_url() { + local url=$1 + echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases/latest" +} + +get_helm() { + HELM_VERSION=$1 + helm_filename="helm-${HELM_VERSION}-linux-${architecture}.tar.gz" + tmp_helm_filename="/tmp/helm/${helm_filename}" + curl -sSL "https://get.helm.sh/${helm_filename}" -o "${tmp_helm_filename}" + curl -sSL "https://github.com/helm/helm/releases/download/${HELM_VERSION}/${helm_filename}.asc" -o "${tmp_helm_filename}.asc" +} + +# Get the list of GPG key servers that are reachable +get_gpg_key_servers() { + declare -A keyservers_curl_map=( + ["hkp://keyserver.ubuntu.com"]="http://keyserver.ubuntu.com:11371" + ["hkp://keyserver.ubuntu.com:80"]="http://keyserver.ubuntu.com" + ["hkps://keys.openpgp.org"]="https://keys.openpgp.org" + ["hkp://keyserver.pgp.com"]="http://keyserver.pgp.com:11371" + ) + + local curl_args="" + local keyserver_reachable=false # Flag to indicate if any keyserver is reachable + + if [ ! -z "${KEYSERVER_PROXY}" ]; then + curl_args="--proxy ${KEYSERVER_PROXY}" + fi + + for keyserver in "${!keyservers_curl_map[@]}"; do + local keyserver_curl_url="${keyservers_curl_map[${keyserver}]}" + if curl -s ${curl_args} --max-time 5 ${keyserver_curl_url} > /dev/null; then + echo "keyserver ${keyserver}" + keyserver_reachable=true + else + echo "(*) Keyserver ${keyserver} is not reachable." >&2 + fi + done + + if ! $keyserver_reachable; then + echo "(!) No keyserver is reachable." >&2 + exit 1 + fi +} + if [ ${HELM_VERSION} != "none" ]; then # Install Helm, verify signature and checksum echo "Downloading Helm..." - find_version_from_git_tags HELM_VERSION "https://github.com/helm/helm" + helm_url="https://github.com/helm/helm" + find_version_from_git_tags HELM_VERSION "${helm_url}" if [ "${HELM_VERSION::1}" != 'v' ]; then HELM_VERSION="v${HELM_VERSION}" fi mkdir -p /tmp/helm - helm_filename="helm-${HELM_VERSION}-linux-${architecture}.tar.gz" - tmp_helm_filename="/tmp/helm/${helm_filename}" - curl -sSL "https://get.helm.sh/${helm_filename}" -o "${tmp_helm_filename}" - curl -sSL "https://github.com/helm/helm/releases/download/${HELM_VERSION}/${helm_filename}.asc" -o "${tmp_helm_filename}.asc" + get_helm "${HELM_VERSION}" + if grep -q "BlobNotFound" "${tmp_helm_filename}"; then + echo -e "\n(!) Failed to fetch the latest artifacts for helm ${HELM_VERSION}..." + repo_url=$(get_github_api_repo_url "${helm_url}") + get_previous_version "${helm_url}" "${repo_url}" HELM_VERSION + echo -e "\nAttempting to install ${HELM_VERSION}" + get_helm "${HELM_VERSION}" + fi export GNUPGHOME="/tmp/helm/gnupg" mkdir -p "${GNUPGHOME}" chmod 700 ${GNUPGHOME} curl -sSL "${HELM_GPG_KEYS_URI}" -o /tmp/helm/KEYS - echo -e "disable-ipv6\n${GPG_KEY_SERVERS}" > ${GNUPGHOME}/dirmngr.conf + echo -e "disable-ipv6\n$(get_gpg_key_servers)" > ${GNUPGHOME}/dirmngr.conf gpg -q --import "/tmp/helm/KEYS" if ! gpg --verify "${tmp_helm_filename}.asc" > ${GNUPGHOME}/verify.log 2>&1; then echo "Verification failed!" @@ -198,6 +312,16 @@ if [ ${HELM_VERSION} != "none" ]; then echo '(!) Helm installation failed!' exit 1 fi + + # helm bash completion + helm completion bash > /etc/bash_completion.d/helm + + # helm zsh completion + if [ -e "${USERHOME}/.oh-my-zsh" ]; then + mkdir -p "${USERHOME}/.oh-my-zsh/completions" + helm completion zsh > "${USERHOME}/.oh-my-zsh/completions/_helm" + chown -R "${USERNAME}" "${USERHOME}/.oh-my-zsh" + fi fi # Install Minikube, verify checksum diff --git a/src/nix/README.md b/src/nix/README.md index 3ab9b0017..4fd9700f3 100644 --- a/src/nix/README.md +++ b/src/nix/README.md @@ -18,6 +18,7 @@ Installs the Nix package manager and optionally a set of packages. | version | Version of Nix to install. | string | latest | | multiUser | Perform a multi-user install (instead of single user) | boolean | true | | packages | Optional comma separated list of Nix packages to install in profile. | string | - | +| useAttributePath | Enable this option to use exact attribute path of the package in the Nixpkgs repository, aligning with the nix-env -iA command. | boolean | false | | flakeUri | Optional URI to a Nix Flake to install in profile. | string | - | | extraNixConfig | Optional comma separated list of extra lines to add to /etc/nix/nix.conf. | string | - | diff --git a/src/nix/devcontainer-feature.json b/src/nix/devcontainer-feature.json index 96a75016b..63723e591 100644 --- a/src/nix/devcontainer-feature.json +++ b/src/nix/devcontainer-feature.json @@ -1,13 +1,16 @@ { "id": "nix", - "version": "1.1.3", + "version": "1.3.1", "name": "Nix Package Manager", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/nix", - "description": "Installs the Nix package manager and optionally a set of packages.", + "description": "Installs the Nix package manager and optionally a set of packages.", "options": { "version": { "type": "string", - "proposals": ["latest", "2.11"], + "proposals": [ + "latest", + "2.11" + ], "default": "latest", "description": "Version of Nix to install." }, @@ -21,6 +24,11 @@ "default": "", "description": "Optional comma separated list of Nix packages to install in profile." }, + "useAttributePath": { + "type": "boolean", + "default": false, + "description": "Enable this option to use exact attribute path of the package in the Nixpkgs repository, aligning with the nix-env -iA command." + }, "flakeUri": { "type": "string", "default": "", @@ -32,11 +40,29 @@ "description": "Optional comma separated list of extra lines to add to /etc/nix/nix.conf." } }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the Nix package manager pre-installed and available on the `PATH`. Currently `flakeUri` works best with a remote URI (e.g., `github:nixos/nixpkgs/nixpkgs-unstable#hello`) as local files need to be in the image. The dev container supports two installation models for Nix: multi-user and single user. Multi-user is the default." + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ], "containerEnv": { "PATH": "/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:${PATH}" }, + "mounts": [ + { + "source": "nix-store-${devcontainerId}", + "target": "/nix", + "type": "volume" + } + ], "entrypoint": "/usr/local/share/nix-entrypoint.sh" -} \ No newline at end of file +} diff --git a/src/nix/install.sh b/src/nix/install.sh index ed048fe8d..0030c2b18 100755 --- a/src/nix/install.sh +++ b/src/nix/install.sh @@ -8,6 +8,7 @@ cd "${FEATURE_DIR}" VERSION="${VERSION:-"latest"}" MULTIUSER="${MULTIUSER:-"true"}" PACKAGES="${PACKAGES//,/ }" +USEATTRIBUTEPATH="${USEATTRIBUTEPATH:-"false"}" FLAKEURI="${FLAKEURI:-""}" EXTRANIXCONFIG="${EXTRANIXCONFIG:-""}" USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}" @@ -22,71 +23,67 @@ fi detect_user USERNAME -if [ -e "/nix" ]; then - echo "(!) Nix is already installed! Skipping installation." -else - if [ "${USERNAME}" = "root" ] && [ "${MULTIUSER}" != "true" ]; then - echo "(!) A single user install is not allowed for root. Add a non-root user to your image or set multiUser to true in your feature configuration." - exit 1 - fi +if [ "${USERNAME}" = "root" ] && [ "${MULTIUSER}" != "true" ]; then + echo "(!) A single user install is not allowed for root. Add a non-root user to your image or set multiUser to true in your feature configuration." + exit 1 +fi - # Verify dependencies - apt_get_update_if_exists - check_command curl "curl ca-certificates" "curl ca-certificates" "curl ca-certificates" - check_command gpg2 gnupg2 gnupg gnupg2 - check_command dirmngr dirmngr dirmngr dirmngr - check_command xz xz-utils xz xz - check_command git git git git - check_command xargs findutils findutils findutils +# Verify dependencies +apt_get_update_if_exists +check_command curl "curl ca-certificates" "curl ca-certificates" "curl ca-certificates" +check_command gpg2 gnupg2 gnupg gnupg2 +check_command dirmngr dirmngr dirmngr dirmngr +check_command xz xz-utils xz xz +check_command git git git git +check_command xargs findutils findutils findutils - # Determine version - find_version_from_git_tags VERSION https://github.com/NixOS/nix "tags/" +# Determine version +find_version_from_git_tags VERSION https://github.com/NixOS/nix "tags/" - # Download and verify install per https://nixos.org/download.html#nix-verify-installation - tmpdir="$(mktemp -d)" - echo "(*) Downloading Nix installer..." - set +e +# Download and verify install per https://nixos.org/download.html#nix-verify-installation +tmpdir="$(mktemp -d)" +echo "(*) Downloading Nix installer..." +set +e +curl -sSLf -o "${tmpdir}/install-nix" https://releases.nixos.org/nix/nix-${VERSION}/install +exit_code=$? +set -e +if [ "$exit_code" != "0" ]; then + # Handle situation where git tags are ahead of what was is available to actually download + echo "(!) Nix version ${VERSION} failed to download. Attempting to fall back one version to retry..." + find_prev_version_from_git_tags VERSION https://github.com/NixOS/nix "tags/" curl -sSLf -o "${tmpdir}/install-nix" https://releases.nixos.org/nix/nix-${VERSION}/install - exit_code=$? - set -e - if [ "$exit_code" != "0" ]; then - # Handle situation where git tags are ahead of what was is available to actually download - echo "(!) Nix version ${VERSION} failed to download. Attempting to fall back one version to retry..." - find_prev_version_from_git_tags VERSION https://github.com/NixOS/nix "tags/" - curl -sSLf -o "${tmpdir}/install-nix" https://releases.nixos.org/nix/nix-${VERSION}/install - fi - cd "${FEATURE_DIR}" +fi +cd "${FEATURE_DIR}" - # Do a multi or single-user setup based on feature config - if [ "${MULTIUSER}" = "true" ]; then - echo "(*) Performing multi-user install..." - sh "${tmpdir}/install-nix" --daemon - else - home_dir="$(eval echo ~${USERNAME})" - if [ ! -e "${home_dir}" ]; then - echo "(!) Home directory ${home_dir} does not exist for ${USERNAME}. Nix install will fail." - exit 1 - fi - echo "(*) Performing single-user install..." - echo -e "\n**NOTE: Nix will only work for user ${USERNAME} on Linux if the host machine user's UID is $(id -u ${USERNAME}). You will need to chown /nix otherwise.**\n" - # Install per https://nixos.org/manual/nix/stable/installation/installing-binary.html#single-user-installation - mkdir -p /nix - chown ${USERNAME} /nix ${tmpdir} - su ${USERNAME} -c "sh \"${tmpdir}/install-nix\" --no-daemon --no-modify-profile" - # nix installer does not update ~/.bashrc, and USER may or may not be defined, so update rc/profile files directly to handle that - snippet=' - if [ "${PATH#*$HOME/.nix-profile/bin}" = "${PATH}" ]; then if [ -z "$USER" ]; then USER=$(whoami); fi; . $HOME/.nix-profile/etc/profile.d/nix.sh; fi - ' - update_rc_file "$home_dir/.bashrc" "${snippet}" - update_rc_file "$home_dir/.zshenv" "${snippet}" - update_rc_file "$home_dir/.profile" "${snippet}" +# Do a multi or single-user setup based on feature config +if [ "${MULTIUSER}" = "true" ]; then + echo "(*) Performing multi-user install..." + sh "${tmpdir}/install-nix" --daemon +else + home_dir="$(eval echo ~${USERNAME})" + if [ ! -e "${home_dir}" ]; then + echo "(!) Home directory ${home_dir} does not exist for ${USERNAME}. Nix install will fail." + exit 1 fi - rm -rf "${tmpdir}" "/tmp/tmp-gnupg" + echo "(*) Performing single-user install..." + echo -e "\n**NOTE: Nix will only work for user ${USERNAME} on Linux if the host machine user's UID is $(id -u ${USERNAME}). You will need to chown /nix otherwise.**\n" + # Install per https://nixos.org/manual/nix/stable/installation/installing-binary.html#single-user-installation + mkdir -p /nix + chown ${USERNAME} /nix ${tmpdir} + su ${USERNAME} -c "sh \"${tmpdir}/install-nix\" --no-daemon --no-modify-profile" + # nix installer does not update ~/.bashrc, and USER may or may not be defined, so update rc/profile files directly to handle that + snippet=' + if [ "${PATH#*$HOME/.nix-profile/bin}" = "${PATH}" ]; then if [ -z "$USER" ]; then USER=$(whoami); fi; . $HOME/.nix-profile/etc/profile.d/nix.sh; fi + ' + update_rc_file "$home_dir/.bashrc" "${snippet}" + update_rc_file "$home_dir/.zshenv" "${snippet}" + update_rc_file "$home_dir/.profile" "${snippet}" fi +rm -rf "${tmpdir}" "/tmp/tmp-gnupg" # Set nix config mkdir -p /etc/nix -create_or_update_file /etc/nix/nix.conf 'sandbox = false' +create_or_update_file /etc/nix/nix.conf 'sandbox = false' if [ ! -z "${FLAKEURI}" ] && [ "${FLAKEURI}" != "none" ]; then create_or_update_file /etc/nix/nix.conf 'experimental-features = nix-command flakes' fi @@ -127,4 +124,4 @@ else " fi -echo "Done!" \ No newline at end of file +echo "Done!" diff --git a/src/nix/post-install-steps.sh b/src/nix/post-install-steps.sh index aa466798b..68f93a391 100755 --- a/src/nix/post-install-steps.sh +++ b/src/nix/post-install-steps.sh @@ -2,10 +2,29 @@ set -e echo "(*) Executing post-installation steps..." +# if not starts with "nixpkgs." add it as prefix to package name +add_nixpkgs_prefix() { + local packages=$1 + local -a addr + IFS=' ' read -ra addr <<<"$packages" + for i in "${!addr[@]}"; do + if [[ ${addr[i]} != nixpkgs.* ]]; then + addr[i]="nixpkgs.${addr[i]}" + fi + done + IFS=' ' echo "${addr[*]}" +} + # Install list of packages in profile if specified. if [ ! -z "${PACKAGES}" ] && [ "${PACKAGES}" != "none" ]; then + if [ "${USEATTRIBUTEPATH}" = "true" ]; then + PACKAGES=$(add_nixpkgs_prefix "$PACKAGES") + echo "Installing packages \"${PACKAGES}\" in profile..." + nix-env -iA ${PACKAGES} + else echo "Installing packages \"${PACKAGES}\" in profile..." nix-env --install ${PACKAGES} + fi fi # Install Nix flake in profile if specified diff --git a/src/node/NOTES.md b/src/node/NOTES.md index 65eb93bf3..506fa1b4e 100644 --- a/src/node/NOTES.md +++ b/src/node/NOTES.md @@ -20,6 +20,8 @@ Alternatively, you can start up an interactive shell which will in turn source ` ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and Rocky Linux distributions with the `apt`, `yum`, `dnf`, or `microdnf` package manager installed. + +**Note**: RedHat 7 Family (RedHat, CentOS, etc.) must use Node versions less than 18 due to its system libraries and long-term support (LTS) policies. `bash` is required to execute the `install.sh` script. diff --git a/src/node/README.md b/src/node/README.md index b98391922..328af529f 100644 --- a/src/node/README.md +++ b/src/node/README.md @@ -18,7 +18,9 @@ Installs Node.js, nvm, yarn, pnpm, and needed dependencies. | version | Select or enter a Node.js version to install | string | lts | | nodeGypDependencies | Install dependencies to compile native node modules (node-gyp)? | boolean | true | | nvmInstallPath | The path where NVM will be installed. | string | /usr/local/share/nvm | +| pnpmVersion | Select or enter the PNPM version to install | string | latest | | nvmVersion | Version of NVM to install. | string | latest | +| installYarnUsingApt | On Debian and Ubuntu systems, you have the option to install Yarn globally via APT. If you choose not to use this option, Yarn will be set up using Corepack instead. This choice is specific to Debian and Ubuntu; for other Linux distributions, Yarn is always installed using Corepack, with a fallback to installation via NPM if an error occurs. | boolean | true | ## Customizations @@ -48,7 +50,9 @@ Alternatively, you can start up an interactive shell which will in turn source ` ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and Rocky Linux distributions with the `apt`, `yum`, `dnf`, or `microdnf` package manager installed. + +**Note**: RedHat 7 Family (RedHat, CentOS, etc.) must use Node versions less than 18 due to its system libraries and long-term support (LTS) policies. `bash` is required to execute the `install.sh` script. diff --git a/src/node/devcontainer-feature.json b/src/node/devcontainer-feature.json index aee55a1f6..1828ba0d5 100644 --- a/src/node/devcontainer-feature.json +++ b/src/node/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "node", - "version": "1.3.0", + "version": "1.6.3", "name": "Node.js (via nvm), yarn and pnpm", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/node", "description": "Installs Node.js, nvm, yarn, pnpm, and needed dependencies.", @@ -11,9 +11,8 @@ "lts", "latest", "none", - "18", - "16", - "14" + "22", + "20" ], "default": "lts", "description": "Select or enter a Node.js version to install" @@ -28,6 +27,20 @@ "default": "/usr/local/share/nvm", "description": "The path where NVM will be installed." }, + "pnpmVersion": { + "type": "string", + "proposals": [ + "latest", + "8.8.0", + "8.0.0", + "7.30.0", + "6.14.8", + "5.18.10", + "none" + ], + "default": "latest", + "description": "Select or enter the PNPM version to install" + }, "nvmVersion": { "type": "string", "proposals": [ @@ -36,13 +49,25 @@ ], "default": "latest", "description": "Version of NVM to install." + }, + "installYarnUsingApt": { + "type": "boolean", + "default": true, + "description": "On Debian and Ubuntu systems, you have the option to install Yarn globally via APT. If you choose not to use this option, Yarn will be set up using Corepack instead. This choice is specific to Debian and Ubuntu; for other Linux distributions, Yarn is always installed using Corepack, with a fallback to installation via NPM if an error occurs." } }, "customizations": { "vscode": { "extensions": [ "dbaeumer.vscode-eslint" - ] + ], + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes `node`, `npm` and `eslint` pre-installed and available on the `PATH` for Node.js and JavaScript development." + } + ] + } } }, "containerEnv": { @@ -53,4 +78,4 @@ "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] -} +} \ No newline at end of file diff --git a/src/node/install.sh b/src/node/install.sh index 809109413..46d448440 100755 --- a/src/node/install.sh +++ b/src/node/install.sh @@ -8,9 +8,11 @@ # Maintainer: The Dev Container spec maintainers export NODE_VERSION="${VERSION:-"lts"}" +export PNPM_VERSION="${PNPMVERSION:-"latest"}" export NVM_VERSION="${NVMVERSION:-"latest"}" export NVM_DIR="${NVMINSTALLPATH:-"/usr/local/share/nvm"}" INSTALL_TOOLS_FOR_NODE_GYP="${NODEGYPDEPENDENCIES:-true}" +export INSTALL_YARN_USING_APT="${INSTALLYARNUSINGAPT:-true}" # only concerns Debian-based systems # Comma-separated list of node versions to be installed (with nvm) # alongside NODE_VERSION, but not set as default. @@ -21,61 +23,144 @@ UPDATE_RC="${UPDATE_RC:-"true"}" set -e -# Clean up -rm -rf /var/lib/apt/lists/* - if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' exit 1 fi +# Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME +. /etc/os-release +# Get an adjusted ID independent of distro variants +MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1) +if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then + ADJUSTED_ID="debian" +elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then + ADJUSTED_ID="rhel" + if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then + VERSION_CODENAME="rhel${MAJOR_VERSION_ID}" + else + VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}" + fi +else + echo "Linux distro ${ID} not supported." + exit 1 +fi + +if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then + # As of 1 July 2024, mirrorlist.centos.org no longer exists. + # Update the repo files to reference vault.centos.org. + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo +fi + +# Setup INSTALL_CMD & PKG_MGR_CMD +if type apt-get > /dev/null 2>&1; then + PKG_MGR_CMD=apt-get + INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends" +elif type microdnf > /dev/null 2>&1; then + PKG_MGR_CMD=microdnf + INSTALL_CMD="${PKG_MGR_CMD} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0" +elif type dnf > /dev/null 2>&1; then + PKG_MGR_CMD=dnf + INSTALL_CMD="${PKG_MGR_CMD} -y install" +else + PKG_MGR_CMD=yum + INSTALL_CMD="${PKG_MGR_CMD} -y install" +fi + +# Clean up +clean_up() { + case ${ADJUSTED_ID} in + debian) + rm -rf /var/lib/apt/lists/* + ;; + rhel) + rm -rf /var/cache/dnf/* /var/cache/yum/* + rm -f /etc/yum.repos.d/yarn.repo + ;; + esac +} +clean_up + # Ensure that login shells get the correct path if the user updated the PATH using ENV. rm -f /etc/profile.d/00-restore-env.sh echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh chmod +x /etc/profile.d/00-restore-env.sh -# Determine the appropriate non-root user -if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then - USERNAME="" - POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") - for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do - if id -u ${CURRENT_USER} > /dev/null 2>&1; then - USERNAME=${CURRENT_USER} - break - fi - done - if [ "${USERNAME}" = "" ]; then - USERNAME=root - fi -elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then - USERNAME=root -fi - updaterc() { + local _bashrc + local _zshrc if [ "${UPDATE_RC}" = "true" ]; then - echo "Updating /etc/bash.bashrc and /etc/zsh/zshrc..." - if [[ "$(cat /etc/bash.bashrc)" != *"$1"* ]]; then - echo -e "$1" >> /etc/bash.bashrc + case $ADJUSTED_ID in + debian) + _bashrc=/etc/bash.bashrc + _zshrc=/etc/zsh/zshrc + ;; + rhel) + _bashrc=/etc/bashrc + _zshrc=/etc/zshrc + ;; + esac + echo "Updating ${_bashrc} and ${_zshrc}..." + if [[ "$(cat ${_bashrc})" != *"$1"* ]]; then + echo -e "$1" >> "${_bashrc}" fi - if [ -f "/etc/zsh/zshrc" ] && [[ "$(cat /etc/zsh/zshrc)" != *"$1"* ]]; then - echo -e "$1" >> /etc/zsh/zshrc + if [ -f "${_zshrc}" ] && [[ "$(cat ${_zshrc})" != *"$1"* ]]; then + echo -e "$1" >> "${_zshrc}" fi fi } -apt_get_update() { - if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then - echo "Running apt-get update..." - apt-get update -y - fi +pkg_mgr_update() { + case $ADJUSTED_ID in + debian) + if [ "$(find /var/lib/apt/lists/* 2>/dev/null | wc -l)" = "0" ]; then + echo "Running apt-get update..." + ${PKG_MGR_CMD} update -y + fi + ;; + rhel) + if [ ${PKG_MGR_CMD} = "microdnf" ]; then + if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then + echo "Running ${PKG_MGR_CMD} makecache ..." + ${PKG_MGR_CMD} makecache + fi + else + if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then + echo "Running ${PKG_MGR_CMD} check-update ..." + set +e + stderr_messages=$(${PKG_MGR_CMD} -q check-update 2>&1) + rc=$? + # centos 7 sometimes returns a status of 100 when it apears to work. + if [ $rc != 0 ] && [ $rc != 100 ]; then + echo "(Error) ${PKG_MGR_CMD} check-update produced the following error message(s):" + echo "${stderr_messages}" + exit 1 + fi + set -e + fi + fi + ;; + esac } # Checks if packages are installed and installs them if not check_packages() { - if ! dpkg -s "$@" > /dev/null 2>&1; then - apt_get_update - apt-get -y install --no-install-recommends "$@" - fi + case ${ADJUSTED_ID} in + debian) + if ! dpkg -s "$@" > /dev/null 2>&1; then + pkg_mgr_update + ${INSTALL_CMD} "$@" + fi + ;; + rhel) + if ! rpm -q "$@" > /dev/null 2>&1; then + pkg_mgr_update + ${INSTALL_CMD} "$@" + fi + ;; + esac } # Figure out correct version of a three part version number is not passed @@ -112,35 +197,96 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } +install_yarn() { + if [ "${ADJUSTED_ID}" = "debian" ] && [ "${INSTALL_YARN_USING_APT}" = "true" ]; then + # for backward compatiblity with existing devcontainer features, install yarn + # via apt-get on Debian systems + if ! type yarn >/dev/null 2>&1; then + # Import key safely (new method rather than deprecated apt-key approach) and install + curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | gpg --dearmor > /usr/share/keyrings/yarn-archive-keyring.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/yarn-archive-keyring.gpg] https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list + apt-get update + apt-get -y install --no-install-recommends yarn + else + echo "Yarn is already installed." + fi + else + local _ver=${1:-node} + # on non-debian systems or if user opted not to use APT, prefer corepack + # Fallback to npm based installation of yarn. + # But try to leverage corepack if possible + # From https://yarnpkg.com: + # The preferred way to manage Yarn is by-project and through Corepack, a tool + # shipped by default with Node.js. Modern releases of Yarn aren't meant to be + # installed globally, or from npm. + if ! bash -c ". '${NVM_DIR}/nvm.sh' && nvm use ${_ver} && type yarn >/dev/null 2>&1"; then + if bash -c ". '${NVM_DIR}/nvm.sh' && nvm use ${_ver} && type corepack >/dev/null 2>&1"; then + su ${USERNAME} -c "umask 0002 && . '${NVM_DIR}/nvm.sh' && nvm use ${_ver} && corepack enable" + fi + if ! bash -c ". '${NVM_DIR}/nvm.sh' && nvm use ${_ver} && type yarn >/dev/null 2>&1"; then + # Yum/DNF want to install nodejs dependencies, we'll use NPM to install yarn + su ${USERNAME} -c "umask 0002 && . '${NVM_DIR}/nvm.sh' && nvm use ${_ver} && npm install --global yarn" + fi + else + echo "Yarn already installed." + fi + fi +} + +# Mariner does not have awk installed by default, this can cause +# problems is username is auto* and later when we try to install +# node via npm. +if ! type awk >/dev/null 2>&1; then + check_packages awk +fi + +# Determine the appropriate non-root user +if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then + USERNAME="" + POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") + for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do + if id -u ${CURRENT_USER} > /dev/null 2>&1; then + USERNAME=${CURRENT_USER} + break + fi + done + if [ "${USERNAME}" = "" ]; then + USERNAME=root + fi +elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then + USERNAME=root +fi + # Ensure apt is in non-interactive to avoid prompts export DEBIAN_FRONTEND=noninteractive -. /etc/os-release -if [[ "bionic" = *"${VERSION_CODENAME}"* ]]; then - if [[ "${NODE_VERSION}" =~ "18" ]] || [[ "${NODE_VERSION}" = "lts" ]]; then - echo "(!) Unsupported distribution version '${VERSION_CODENAME}' for Node 18. Details: https://github.com/nodejs/node/issues/42351#issuecomment-1068424442" +if ( [ -n "${VERSION_CODENAME}" ] && [[ "bionic" = *"${VERSION_CODENAME}"* ]] ) || [[ "rhel7" = *"${ADJUSTED_ID}${MAJOR_VERSION_ID}"* ]]; then + node_major_version=$(echo "${NODE_VERSION}" | cut -d . -f 1) + if [[ "${node_major_version}" -ge 18 ]] || [[ "${NODE_VERSION}" = "lts" ]] || [[ "${NODE_VERSION}" = "latest" ]]; then + echo "(!) Unsupported distribution version '${VERSION_CODENAME}' for Node >= 18. Details: https://github.com/nodejs/node/issues/42351#issuecomment-1068424442" exit 1 fi fi # Install dependencies -check_packages apt-transport-https curl ca-certificates tar gnupg2 dirmngr +case ${ADJUSTED_ID} in + debian) + check_packages apt-transport-https curl ca-certificates tar gnupg2 dirmngr + ;; + rhel) + check_packages ca-certificates tar gnupg2 which findutils util-linux tar + # minimal RHEL installs may not include curl, or includes curl-minimal instead. + # Install curl if the "curl" command is not present. + if ! type curl > /dev/null 2>&1; then + check_packages curl + fi + ;; +esac if ! type git > /dev/null 2>&1; then check_packages git fi -# Install yarn -if type yarn > /dev/null 2>&1; then - echo "Yarn already installed." -else - # Import key safely (new method rather than deprecated apt-key approach) and install - curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | gpg --dearmor > /usr/share/keyrings/yarn-archive-keyring.gpg - echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/yarn-archive-keyring.gpg] https://dl.yarnpkg.com/debian/ stable main" > /etc/apt/sources.list.d/yarn.list - apt-get update - apt-get -y install --no-install-recommends yarn -fi - # Adjust node version if required if [ "${NODE_VERSION}" = "none" ]; then export NODE_VERSION= @@ -158,8 +304,12 @@ set -e umask 0002 # Do not update profile - we'll do this manually export PROFILE=/dev/null -curl -so- "https://raw.githubusercontent.com/nvm-sh/nvm/v${NVM_VERSION}/install.sh" | bash -source "${NVM_DIR}/nvm.sh" +curl -so- "https://raw.githubusercontent.com/nvm-sh/nvm/v${NVM_VERSION}/install.sh" | bash || { + PREV_NVM_VERSION=$(curl -s https://api.github.com/repos/nvm-sh/nvm/releases/latest | grep '"tag_name"' | sed -E 's/.*"([^"]+)".*/\1/') + curl -so- "https://raw.githubusercontent.com/nvm-sh/nvm/\${PREV_NVM_VERSION}/install.sh" | bash + NVM_VERSION="\${PREV_NVM_VERSION}" +} +[ -s "${NVM_DIR}/nvm.sh" ] && source "${NVM_DIR}/nvm.sh" if [ "${NODE_VERSION}" != "" ]; then nvm alias default "${NODE_VERSION}" fi @@ -204,6 +354,9 @@ else fi fi +# Possibly install yarn (puts yarn in per-Node install on RHEL, uses system yarn on Debian) +install_yarn + # Additional node versions to be installed but not be set as # default we can assume the nvm is the group owner of the nvm # directory and the sticky bit on directories so any installed @@ -214,6 +367,8 @@ if [ ! -z "${ADDITIONAL_VERSIONS}" ]; then read -a additional_versions <<< "$ADDITIONAL_VERSIONS" for ver in "${additional_versions[@]}"; do su ${USERNAME} -c "umask 0002 && . '$NVM_DIR/nvm.sh' && nvm install '${ver}'" + # possibly install yarn (puts yarn in per-Node install on RHEL, uses system yarn on Debian) + install_yarn "${ver}" done # Ensure $NODE_VERSION is on the $PATH @@ -224,11 +379,17 @@ if [ ! -z "${ADDITIONAL_VERSIONS}" ]; then fi # Install pnpm -if type pnpm > /dev/null 2>&1; then - echo "pnpm already installed." +if [ ! -z "${PNPM_VERSION}" ] && [ "${PNPM_VERSION}" = "none" ]; then + echo "Ignoring installation of PNPM" else - if type npm > /dev/null 2>&1; then - npm install -g pnpm + if bash -c ". '${NVM_DIR}/nvm.sh' && type npm >/dev/null 2>&1"; then + ( + . "${NVM_DIR}/nvm.sh" + [ ! -z "$http_proxy" ] && npm set proxy="$http_proxy" + [ ! -z "$https_proxy" ] && npm set https-proxy="$https_proxy" + [ ! -z "$no_proxy" ] && npm set noproxy="$no_proxy" + npm install -g pnpm@$PNPM_VERSION --force + ) else echo "Skip installing pnpm because npm is missing" fi @@ -245,21 +406,29 @@ if [ "${INSTALL_TOOLS_FOR_NODE_GYP}" = "true" ]; then to_install="${to_install} gcc" fi if ! type g++ > /dev/null 2>&1; then - to_install="${to_install} g++" + if [ ${ADJUSTED_ID} = "debian" ]; then + to_install="${to_install} g++" + elif [ ${ADJUSTED_ID} = "rhel" ]; then + to_install="${to_install} gcc-c++" + fi fi if ! type python3 > /dev/null 2>&1; then - to_install="${to_install} python3-minimal" + if [ ${ADJUSTED_ID} = "debian" ]; then + to_install="${to_install} python3-minimal" + elif [ ${ADJUSTED_ID} = "rhel" ]; then + to_install="${to_install} python3" + fi fi if [ ! -z "${to_install}" ]; then - apt_get_update - apt-get -y install ${to_install} + pkg_mgr_update + check_packages ${to_install} fi fi # Clean up su ${USERNAME} -c "umask 0002 && . '$NVM_DIR/nvm.sh' && nvm clear-cache" -rm -rf /var/lib/apt/lists/* +clean_up # Ensure privs are correct for installed node versions. Unfortunately the # way nvm installs node versions pulls privs from the tar which does not diff --git a/src/nvidia-cuda/README.md b/src/nvidia-cuda/README.md index 7a0343de4..7982cfc9e 100644 --- a/src/nvidia-cuda/README.md +++ b/src/nvidia-cuda/README.md @@ -7,7 +7,7 @@ Installs shared libraries for NVIDIA CUDA. ```json "features": { - "ghcr.io/devcontainers/features/nvidia-cuda:1": {} + "ghcr.io/devcontainers/features/nvidia-cuda:2": {} } ``` @@ -16,9 +16,11 @@ Installs shared libraries for NVIDIA CUDA. | Options Id | Description | Type | Default Value | |-----|-----|-----|-----| | installCudnn | Additionally install CUDA Deep Neural Network (cuDNN) shared library | boolean | false | +| installCudnnDev | Additionally install CUDA Deep Neural Network (cuDNN) development libraries and headers | boolean | false | | installNvtx | Additionally install NVIDIA Tools Extension (NVTX) | boolean | false | +| installToolkit | Additionally install NVIDIA CUDA Toolkit | boolean | false | | cudaVersion | Version of CUDA to install | string | 11.8 | -| cudnnVersion | Version of cuDNN to install | string | 8.6.0.163 | +| cudnnVersion | Version of cuDNN to install | string | automatic | ## Compatibility diff --git a/src/nvidia-cuda/devcontainer-feature.json b/src/nvidia-cuda/devcontainer-feature.json index 81e1dff8d..7dd46f7c0 100644 --- a/src/nvidia-cuda/devcontainer-feature.json +++ b/src/nvidia-cuda/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "nvidia-cuda", - "version": "1.0.7", + "version": "2.0.0", "name": "NVIDIA CUDA", "description": "Installs shared libraries for NVIDIA CUDA.", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/nvidia-cuda", @@ -10,14 +10,30 @@ "default": false, "description": "Additionally install CUDA Deep Neural Network (cuDNN) shared library" }, + "installCudnnDev": { + "type": "boolean", + "default": false, + "description": "Additionally install CUDA Deep Neural Network (cuDNN) development libraries and headers" + }, "installNvtx": { "type": "boolean", "default": false, "description": "Additionally install NVIDIA Tools Extension (NVTX)" }, + "installToolkit": { + "type": "boolean", + "default": false, + "description": "Additionally install NVIDIA CUDA Toolkit" + }, "cudaVersion": { "type": "string", "proposals": [ + "12.5", + "12.4", + "12.3", + "12.2", + "12.1", + "12.0", "11.8", "11.7", "11.6", @@ -32,6 +48,16 @@ "cudnnVersion": { "type": "string", "proposals": [ + "automatic", + "8.9.5.29", + "8.9.4.25", + "8.9.3.28", + "8.9.2.26", + "8.9.1.23", + "8.9.0.131", + "8.8.1.3", + "8.8.0.121", + "8.7.0.84", "8.6.0.163", "8.5.0.96", "8.4.1.50", @@ -45,12 +71,30 @@ "8.2.1.32", "8.2.0.53", "8.1.1.33", - "8.1.0.77" + "8.1.0.77", + "9.0.0.312", + "9.1.0.70", + "9.1.1.17", + "9.2.0.82", + "9.2.1.18", + "9.3.0.75", + "9.4.0.58" ], - "default": "8.6.0.163", + "default": "automatic", "description": "Version of cuDNN to install" } }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes shared libraries for NVIDIA CUDA pre-installed and available on the `PATH`. It's only useful for dev containers that run on a host machine with an NVIDIA GPU. Within your dev container, use the `nvidia-smi` command to ensure that your GPU is available for CUDA. If the `nvidia-smi` command is not available, you may need to follow NVIDIA's instructions to install the NVIDIA Container Toolkit on your host machine." + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] diff --git a/src/nvidia-cuda/install.sh b/src/nvidia-cuda/install.sh index f7db18e5e..6de935540 100644 --- a/src/nvidia-cuda/install.sh +++ b/src/nvidia-cuda/install.sh @@ -6,10 +6,14 @@ set -e rm -rf /var/lib/apt/lists/* INSTALL_CUDNN=${INSTALLCUDNN} +INSTALL_CUDNNDEV=${INSTALLCUDNNDEV} INSTALL_NVTX=${INSTALLNVTX} +INSTALL_TOOLKIT=${INSTALLTOOLKIT} CUDA_VERSION=${CUDAVERSION} CUDNN_VERSION=${CUDNNVERSION} +. /etc/os-release + if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' exit 1 @@ -31,11 +35,33 @@ check_packages() { fi } +if [ $VERSION_CODENAME = "bookworm" ] || [ $VERSION_CODENAME = "jammy" ] && [ $CUDA_VERSION \< 11.7 ]; then + echo "(!) Unsupported distribution version '${VERSION_CODENAME}' for CUDA < 11.7" + exit 1 +fi + +export DEBIAN_FRONTEND=noninteractive + check_packages wget ca-certificates +# Determine system architecture and set NVIDIA repository URL accordingly +ARCH=$(uname -m) +case $ARCH in + x86_64) + NVIDIA_ARCH="x86_64" + ;; + aarch64 | arm64) + NVIDIA_ARCH="arm64" + ;; + *) + echo "Unsupported architecture: $ARCH" + exit 1 + ;; +esac + # Add NVIDIA's package repository to apt so that we can download packages -# Always use the ubuntu2004 repo because the other repos (e.g., debian11) are missing packages -NVIDIA_REPO_URL="https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64" +# Updating the repo to ubuntu2204 as ubuntu 20.04 is going out of support. +NVIDIA_REPO_URL="https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/$NVIDIA_ARCH" KEYRING_PACKAGE="cuda-keyring_1.0-1_all.deb" KEYRING_PACKAGE_URL="$NVIDIA_REPO_URL/$KEYRING_PACKAGE" KEYRING_PACKAGE_PATH="$(mktemp -d)" @@ -47,19 +73,47 @@ apt-get update -yq # Ensure that the requested version of CUDA is available cuda_pkg="cuda-libraries-${CUDA_VERSION/./-}" nvtx_pkg="cuda-nvtx-${CUDA_VERSION/./-}" +toolkit_pkg="cuda-toolkit-${CUDA_VERSION/./-}" if ! apt-cache show "$cuda_pkg"; then echo "The requested version of CUDA is not available: CUDA $CUDA_VERSION" + if [ "$NVIDIA_ARCH" = "arm64" ]; then + echo "Note: arm64 supports limited CUDA versions. Please check available versions:" + echo "https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/arm64" + fi exit 1 fi echo "Installing CUDA libraries..." apt-get install -yq "$cuda_pkg" +apt-get update -yq --fix-missing + +# auto find recent cudnn version +major_cuda_version=$(echo "${CUDA_VERSION}" | cut -d '.' -f 1) +if [ "$CUDNN_VERSION" = "automatic" ]; then + if [[ "$CUDA_VERSION" < "12.3" ]]; then + CUDNN_VERSION=$(apt-cache policy libcudnn8 | grep "$CUDA_VERSION" | grep -Eo '^[^-1+]*' | sort -V | tail -n1 | xargs) + else + CUDNN_VERSION=$(apt-cache policy libcudnn9-cuda-$major_cuda_version | grep "Candidate" | awk '{print $2}' | grep -Eo '^[^-+]*') + fi +fi +major_cudnn_version=$(echo "${CUDNN_VERSION}" | cut -d '.' -f 1) if [ "$INSTALL_CUDNN" = "true" ]; then # Ensure that the requested version of cuDNN is available AND compatible - cudnn_pkg_version="libcudnn8=${CUDNN_VERSION}-1+cuda${CUDA_VERSION}" + #if major cudnn version is 9, then we need to install libcudnn9-cuda-_-1 package + #else we need to install libcudnn8_-1+cuda" package + if [[ $major_cudnn_version -ge "9" ]] + then + cudnn_pkg_version="libcudnn9-cuda-${major_cuda_version}=${CUDNN_VERSION}-1" + else + cudnn_pkg_version="libcudnn8=${CUDNN_VERSION}-1+cuda${CUDA_VERSION}" + fi + if ! apt-cache show "$cudnn_pkg_version"; then echo "The requested version of cuDNN is not available: cuDNN $CUDNN_VERSION for CUDA $CUDA_VERSION" + if [ "$NVIDIA_ARCH" = "arm64" ]; then + echo "Note: arm64 has limited cuDNN package availability" + fi exit 1 fi @@ -67,11 +121,38 @@ if [ "$INSTALL_CUDNN" = "true" ]; then apt-get install -yq "$cudnn_pkg_version" fi +if [ "$INSTALL_CUDNNDEV" = "true" ]; then + # Ensure that the requested version of cuDNN development package is available AND compatible + #if major cudnn version is 9, then we need to install libcudnn9-dev-cuda-_-1 package + #else we need to install libcudnn8-dev_-1+cuda" package + if [[ $major_cudnn_version -ge "9" ]] + then + cudnn_dev_pkg_version="libcudnn9-dev-cuda-${major_cuda_version}=${CUDNN_VERSION}-1" + else + cudnn_dev_pkg_version="libcudnn8-dev=${CUDNN_VERSION}-1+cuda${CUDA_VERSION}" + fi + if ! apt-cache show "$cudnn_dev_pkg_version"; then + echo "The requested version of cuDNN development package is not available: cuDNN $CUDNN_VERSION for CUDA $CUDA_VERSION" + if [ "$NVIDIA_ARCH" = "arm64" ]; then + echo "Note: arm64 has limited cuDNN development package availability" + fi + exit 1 + fi + + echo "Installing cuDNN dev libraries..." + apt-get install -yq "$cudnn_dev_pkg_version" +fi + if [ "$INSTALL_NVTX" = "true" ]; then echo "Installing NVTX..." apt-get install -yq "$nvtx_pkg" fi +if [ "$INSTALL_TOOLKIT" = "true" ]; then + echo "Installing CUDA Toolkit..." + apt-get install -yq "$toolkit_pkg" +fi + # Clean up rm -rf /var/lib/apt/lists/* diff --git a/src/oryx/devcontainer-feature.json b/src/oryx/devcontainer-feature.json index 66a528ec5..860a39003 100644 --- a/src/oryx/devcontainer-feature.json +++ b/src/oryx/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "oryx", - "version": "1.1.0", + "version": "1.4.1", "name": "Oryx", "description": "Installs the oryx CLI", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/oryx", @@ -13,6 +13,17 @@ "DEBIAN_FLAVOR": "focal-scm", "PATH": "/usr/local/oryx:${PATH}" }, + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the oryx CLI pre-installed and available on the `PATH`." + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils", "ghcr.io/devcontainers/features/dotnet" diff --git a/src/oryx/install.sh b/src/oryx/install.sh index cb2e1b6b7..cf67db6b1 100755 --- a/src/oryx/install.sh +++ b/src/oryx/install.sh @@ -70,11 +70,29 @@ check_packages() { fi } +install_dotnet_with_script() +{ + local version="$1" + CURRENT_DIR=$(dirname "${BASH_SOURCE[0]}") + DOTNET_INSTALL_SCRIPT="$CURRENT_DIR/scripts/vendor/dotnet-install.sh" + DOTNET_INSTALL_DIR='/usr/share/dotnet' + + check_packages icu-devtools + + "$DOTNET_INSTALL_SCRIPT" \ + --version "$version" \ + --install-dir "$DOTNET_INSTALL_DIR" \ + --no-path + + DOTNET_BINARY="dotnet" + export PATH="${PATH}:/usr/share/dotnet" +} + install_dotnet_using_apt() { echo "Attempting to auto-install dotnet..." install_from_microsoft_feed=false apt_get_update - DOTNET_INSTALLATION_PACKAGE="dotnet7" + DOTNET_INSTALLATION_PACKAGE="dotnet8" apt-get -yq install $DOTNET_INSTALLATION_PACKAGE || install_from_microsoft_feed="true" if [ "${install_from_microsoft_feed}" = "true" ]; then @@ -82,10 +100,11 @@ install_dotnet_using_apt() { curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list apt-get update -y - DOTNET_INSTALLATION_PACKAGE="dotnet-sdk-7.0" + DOTNET_INSTALLATION_PACKAGE="dotnet-sdk-8.0" DOTNET_SKIP_FIRST_TIME_EXPERIENCE="true" apt-get install -yq $DOTNET_INSTALLATION_PACKAGE fi + DOTNET_BINARY="/usr/bin/dotnet" echo -e "Finished attempt to install dotnet. Sdks installed:\n" dotnet --list-sdks @@ -130,19 +149,25 @@ DOTNET_BINARY="" if dotnet --version > /dev/null ; then DOTNET_BINARY=$(which dotnet) + RUNTIME_VERSIONS=$(dotnet --list-runtimes | awk '{print $2}' | sort | uniq) fi -# Oryx needs to be built with .NET 7 -if [[ "${DOTNET_BINARY}" = "" ]] || [[ "$(dotnet --version)" != *"7"* ]] ; then - echo "'dotnet 7' was not detected. Attempting to install .NET 7 to build oryx." - install_dotnet_using_apt - +MAJOR_VERSION_ID=$(echo $(dotnet --version) | cut -d . -f 1) +PATCH_VERSION_ID=$(echo $(dotnet --version) | cut -d . -f 3) + +PINNED_SDK_VERSION="" +# Oryx needs to be built with .NET 8 +if [[ "${DOTNET_BINARY}" = "" ]] || [[ $MAJOR_VERSION_ID != "8" ]] || [[ $MAJOR_VERSION_ID = "8" && ${PATCH_VERSION_ID} -ne "202" ]] ; then + echo "'dotnet 8' was not detected. Attempting to install .NET 8 to build oryx." + # The oryx build fails with .Net 8.0.201, see https://github.com/devcontainers/images/issues/974 + # Pinning it to a working version until the upstream Oryx repo updates the dependency + # install_dotnet_using_apt + PINNED_SDK_VERSION="8.0.202" + install_dotnet_with_script ${PINNED_SDK_VERSION} if ! dotnet --version > /dev/null ; then echo "(!) Please install Dotnet before installing Oryx" exit 1 fi - - DOTNET_BINARY="/usr/bin/dotnet" fi BUILD_SCRIPT_GENERATOR=/usr/local/buildscriptgen @@ -154,14 +179,19 @@ mkdir -p ${ORYX} git clone --depth=1 https://github.com/microsoft/Oryx $GIT_ORYX +if [[ "${PINNED_SDK_VERSION}" != "" ]]; then + cd $GIT_ORYX + dotnet new globaljson --sdk-version ${PINNED_SDK_VERSION} +fi + SOLUTION_FILE_NAME="Oryx.sln" echo "Building solution '$SOLUTION_FILE_NAME'..." cd $GIT_ORYX ${DOTNET_BINARY} build "$SOLUTION_FILE_NAME" -c Debug -${DOTNET_BINARY} publish -property:ValidateExecutableReferencesMatchSelfContained=false -r linux-x64 -o ${BUILD_SCRIPT_GENERATOR} -c Release $GIT_ORYX/src/BuildScriptGeneratorCli/BuildScriptGeneratorCli.csproj -${DOTNET_BINARY} publish -r linux-x64 -o ${BUILD_SCRIPT_GENERATOR} -c Release $GIT_ORYX/src/BuildServer/BuildServer.csproj +${DOTNET_BINARY} publish -property:ValidateExecutableReferencesMatchSelfContained=false -r linux-x64 -o ${BUILD_SCRIPT_GENERATOR} -c Release $GIT_ORYX/src/BuildScriptGeneratorCli/BuildScriptGeneratorCli.csproj --self-contained true +${DOTNET_BINARY} publish -r linux-x64 -o ${BUILD_SCRIPT_GENERATOR} -c Release $GIT_ORYX/src/BuildServer/BuildServer.csproj --self-contained true chmod a+x ${BUILD_SCRIPT_GENERATOR}/GenerateBuildScript @@ -201,7 +231,22 @@ if [[ "${DOTNET_INSTALLATION_PACKAGE}" != "" ]]; then apt purge -yq $DOTNET_INSTALLATION_PACKAGE fi +if [[ "${PINNED_SDK_VERSION}" != "" ]]; then + rm -f ${GIT_ORYX}/global.json + rm -rf /usr/share/dotnet/sdk/$PINNED_SDK_VERSION + NEW_RUNTIME_VERSIONS=$(dotnet --list-runtimes | awk '{print $2}' | sort | uniq) + if [ -n "${RUNTIME_VERSIONS:-}" ]; then + SDK_INSTALLED_RUNTIME=$(echo "$NEW_RUNTIME_VERSIONS" | grep -vxFf <(echo "$RUNTIME_VERSIONS")) + else + SDK_INSTALLED_RUNTIME="$NEW_RUNTIME_VERSIONS" + fi + rm -rf /usr/share/dotnet/shared/Microsoft.NETCore.App/$SDK_INSTALLED_RUNTIME + rm -rf /usr/share/dotnet/shared/Microsoft.AspNetCore.App/$SDK_INSTALLED_RUNTIME + rm -rf /usr/share/dotnet/templates/$SDK_INSTALLED_RUNTIME +fi + + # Clean up rm -rf /var/lib/apt/lists/* -echo "Done!" +echo "Done!" \ No newline at end of file diff --git a/src/oryx/scripts/vendor/README.md b/src/oryx/scripts/vendor/README.md new file mode 100644 index 000000000..181b53781 --- /dev/null +++ b/src/oryx/scripts/vendor/README.md @@ -0,0 +1,27 @@ +### **IMPORTANT NOTE** + +Scripts in this directory are sourced externally and not maintained by the Dev Container spec maintainers. Do not make changes directly as they might be overwritten at any moment. + +## dotnet-install.sh + +`dotnet-install.sh` is a copy of . ([Script reference](https://learn.microsoft.com/en-us/dotnet/core/tools/dotnet-install-script)) + +Quick options reminder for `dotnet-install.sh`: + +- `--version`: `"latest"` (default) or an exact version in the form A.B.C like `"6.0.413"` +- `--channel`: `"LTS"` (default), `"STS"`, a two-part version in the form A.B like `"6.0"` or three-part form A.B.Cxx like `"6.0.1xx"` +- `--quality`: `"daily"`, `"preview"` or `"GA"` +- The channel option is only used when version is 'latest' because an exact version overrides the channel option +- The quality option is only used when channel is 'A.B' or 'A.B.Cxx' because it can't be used with STS or LTS + +Examples + +``` +dotnet-install.sh [--version latest] [--channel LTS] +dotnet-install.sh [--version latest] --channel STS +dotnet-install.sh [--version latest] --channel 6.0 [--quality GA] +dotnet-install.sh [--version latest] --channel 6.0.4xx [--quality GA] +dotnet-install.sh [--version latest] --channel 8.0 --quality preview +dotnet-install.sh [--version latest] --channel 8.0 --quality daily +dotnet-install.sh --version 6.0.413 +``` \ No newline at end of file diff --git a/src/oryx/scripts/vendor/dotnet-install.sh b/src/oryx/scripts/vendor/dotnet-install.sh new file mode 100755 index 000000000..122ee68ed --- /dev/null +++ b/src/oryx/scripts/vendor/dotnet-install.sh @@ -0,0 +1,1959 @@ +#!/usr/bin/env bash +# Copyright (c) .NET Foundation and contributors. All rights reserved. +# Licensed under the MIT license. See LICENSE file in the project root for full license information. +# + +# Stop script on NZEC +set -e +# Stop script if unbound variable found (use ${var:-} if intentional) +set -u +# By default cmd1 | cmd2 returns exit code of cmd2 regardless of cmd1 success +# This is causing it to fail +set -o pipefail + +# Use in the the functions: eval $invocation +invocation='say_verbose "Calling: ${yellow:-}${FUNCNAME[0]} ${green:-}$*${normal:-}"' + +# standard output may be used as a return value in the functions +# we need a way to write text on the screen in the functions so that +# it won't interfere with the return value. +# Exposing stream 3 as a pipe to standard output of the script itself +exec 3>&1 + +# Setup some colors to use. These need to work in fairly limited shells, like the Ubuntu Docker container where there are only 8 colors. +# See if stdout is a terminal +if [ -t 1 ] && command -v tput > /dev/null; then + # see if it supports colors + ncolors=$(tput colors || echo 0) + if [ -n "$ncolors" ] && [ $ncolors -ge 8 ]; then + bold="$(tput bold || echo)" + normal="$(tput sgr0 || echo)" + black="$(tput setaf 0 || echo)" + red="$(tput setaf 1 || echo)" + green="$(tput setaf 2 || echo)" + yellow="$(tput setaf 3 || echo)" + blue="$(tput setaf 4 || echo)" + magenta="$(tput setaf 5 || echo)" + cyan="$(tput setaf 6 || echo)" + white="$(tput setaf 7 || echo)" + fi +fi + +say_warning() { + printf "%b\n" "${yellow:-}dotnet_install: Warning: $1${normal:-}" >&3 +} + +say_err() { + printf "%b\n" "${red:-}dotnet_install: Error: $1${normal:-}" >&2 +} + +say() { + # using stream 3 (defined in the beginning) to not interfere with stdout of functions + # which may be used as return value + printf "%b\n" "${cyan:-}dotnet-install:${normal:-} $1" >&3 +} + +say_verbose() { + if [ "$verbose" = true ]; then + say "$1" + fi +} + +# This platform list is finite - if the SDK/Runtime has supported Linux distribution-specific assets, +# then and only then should the Linux distribution appear in this list. +# Adding a Linux distribution to this list does not imply distribution-specific support. +get_legacy_os_name_from_platform() { + eval $invocation + + platform="$1" + case "$platform" in + "centos.7") + echo "centos" + return 0 + ;; + "debian.8") + echo "debian" + return 0 + ;; + "debian.9") + echo "debian.9" + return 0 + ;; + "fedora.23") + echo "fedora.23" + return 0 + ;; + "fedora.24") + echo "fedora.24" + return 0 + ;; + "fedora.27") + echo "fedora.27" + return 0 + ;; + "fedora.28") + echo "fedora.28" + return 0 + ;; + "opensuse.13.2") + echo "opensuse.13.2" + return 0 + ;; + "opensuse.42.1") + echo "opensuse.42.1" + return 0 + ;; + "opensuse.42.3") + echo "opensuse.42.3" + return 0 + ;; + "rhel.7"*) + echo "rhel" + return 0 + ;; + "ubuntu.14.04") + echo "ubuntu" + return 0 + ;; + "ubuntu.16.04") + echo "ubuntu.16.04" + return 0 + ;; + "ubuntu.16.10") + echo "ubuntu.16.10" + return 0 + ;; + "ubuntu.18.04") + echo "ubuntu.18.04" + return 0 + ;; + "alpine.3.4.3") + echo "alpine" + return 0 + ;; + esac + return 1 +} + +get_legacy_os_name() { + eval $invocation + + local uname=$(uname) + if [ "$uname" = "Darwin" ]; then + echo "osx" + return 0 + elif [ -n "$runtime_id" ]; then + echo $(get_legacy_os_name_from_platform "${runtime_id%-*}" || echo "${runtime_id%-*}") + return 0 + else + if [ -e /etc/os-release ]; then + . /etc/os-release + os=$(get_legacy_os_name_from_platform "$ID${VERSION_ID:+.${VERSION_ID}}" || echo "") + if [ -n "$os" ]; then + echo "$os" + return 0 + fi + fi + fi + + say_verbose "Distribution specific OS name and version could not be detected: UName = $uname" + return 1 +} + +get_linux_platform_name() { + eval $invocation + + if [ -n "$runtime_id" ]; then + echo "${runtime_id%-*}" + return 0 + else + if [ -e /etc/os-release ]; then + . /etc/os-release + echo "$ID${VERSION_ID:+.${VERSION_ID}}" + return 0 + elif [ -e /etc/redhat-release ]; then + local redhatRelease=$(&1 || true) | grep -q musl +} + +get_current_os_name() { + eval $invocation + + local uname=$(uname) + if [ "$uname" = "Darwin" ]; then + echo "osx" + return 0 + elif [ "$uname" = "FreeBSD" ]; then + echo "freebsd" + return 0 + elif [ "$uname" = "Linux" ]; then + local linux_platform_name="" + linux_platform_name="$(get_linux_platform_name)" || true + + if [ "$linux_platform_name" = "rhel.6" ]; then + echo $linux_platform_name + return 0 + elif is_musl_based_distro; then + echo "linux-musl" + return 0 + elif [ "$linux_platform_name" = "linux-musl" ]; then + echo "linux-musl" + return 0 + else + echo "linux" + return 0 + fi + fi + + say_err "OS name could not be detected: UName = $uname" + return 1 +} + +machine_has() { + eval $invocation + + command -v "$1" > /dev/null 2>&1 + return $? +} + +check_min_reqs() { + local hasMinimum=false + if machine_has "curl"; then + hasMinimum=true + elif machine_has "wget"; then + hasMinimum=true + fi + + if [ "$hasMinimum" = "false" ]; then + say_err "curl (recommended) or wget are required to download dotnet. Install missing prerequisite to proceed." + return 1 + fi + return 0 +} + +# args: +# input - $1 +to_lowercase() { + #eval $invocation + + echo "$1" | tr '[:upper:]' '[:lower:]' + return 0 +} + +# args: +# input - $1 +remove_trailing_slash() { + #eval $invocation + + local input="${1:-}" + echo "${input%/}" + return 0 +} + +# args: +# input - $1 +remove_beginning_slash() { + #eval $invocation + + local input="${1:-}" + echo "${input#/}" + return 0 +} + +# args: +# root_path - $1 +# child_path - $2 - this parameter can be empty +combine_paths() { + eval $invocation + + # TODO: Consider making it work with any number of paths. For now: + if [ ! -z "${3:-}" ]; then + say_err "combine_paths: Function takes two parameters." + return 1 + fi + + local root_path="$(remove_trailing_slash "$1")" + local child_path="$(remove_beginning_slash "${2:-}")" + say_verbose "combine_paths: root_path=$root_path" + say_verbose "combine_paths: child_path=$child_path" + echo "$root_path/$child_path" + return 0 +} + +get_machine_architecture() { + eval $invocation + + if command -v uname > /dev/null; then + CPUName=$(uname -m) + case $CPUName in + armv1*|armv2*|armv3*|armv4*|armv5*|armv6*) + echo "armv6-or-below" + return 0 + ;; + armv*l) + echo "arm" + return 0 + ;; + aarch64|arm64) + if [ "$(getconf LONG_BIT)" -lt 64 ]; then + # This is 32-bit OS running on 64-bit CPU (for example Raspberry Pi OS) + echo "arm" + return 0 + fi + echo "arm64" + return 0 + ;; + s390x) + echo "s390x" + return 0 + ;; + ppc64le) + echo "ppc64le" + return 0 + ;; + loongarch64) + echo "loongarch64" + return 0 + ;; + riscv64) + echo "riscv64" + return 0 + ;; + powerpc|ppc) + echo "ppc" + return 0 + ;; + esac + fi + + # Always default to 'x64' + echo "x64" + return 0 +} + +# args: +# architecture - $1 +get_normalized_architecture_from_architecture() { + eval $invocation + + local architecture="$(to_lowercase "$1")" + + if [[ $architecture == \ ]]; then + machine_architecture="$(get_machine_architecture)" + if [[ "$machine_architecture" == "armv6-or-below" ]]; then + say_err "Architecture \`$machine_architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues" + return 1 + fi + + echo $machine_architecture + return 0 + fi + + case "$architecture" in + amd64|x64) + echo "x64" + return 0 + ;; + arm) + echo "arm" + return 0 + ;; + arm64) + echo "arm64" + return 0 + ;; + s390x) + echo "s390x" + return 0 + ;; + ppc64le) + echo "ppc64le" + return 0 + ;; + loongarch64) + echo "loongarch64" + return 0 + ;; + esac + + say_err "Architecture \`$architecture\` not supported. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues" + return 1 +} + +# args: +# version - $1 +# channel - $2 +# architecture - $3 +get_normalized_architecture_for_specific_sdk_version() { + eval $invocation + + local is_version_support_arm64="$(is_arm64_supported "$1")" + local is_channel_support_arm64="$(is_arm64_supported "$2")" + local architecture="$3"; + local osname="$(get_current_os_name)" + + if [ "$osname" == "osx" ] && [ "$architecture" == "arm64" ] && { [ "$is_version_support_arm64" = false ] || [ "$is_channel_support_arm64" = false ]; }; then + #check if rosetta is installed + if [ "$(/usr/bin/pgrep oahd >/dev/null 2>&1;echo $?)" -eq 0 ]; then + say_verbose "Changing user architecture from '$architecture' to 'x64' because .NET SDKs prior to version 6.0 do not support arm64." + echo "x64" + return 0; + else + say_err "Architecture \`$architecture\` is not supported for .NET SDK version \`$version\`. Please install Rosetta to allow emulation of the \`$architecture\` .NET SDK on this platform" + return 1 + fi + fi + + echo "$architecture" + return 0 +} + +# args: +# version or channel - $1 +is_arm64_supported() { + # Extract the major version by splitting on the dot + major_version="${1%%.*}" + + # Check if the major version is a valid number and less than 6 + case "$major_version" in + [0-9]*) + if [ "$major_version" -lt 6 ]; then + echo false + return 0 + fi + ;; + esac + + echo true + return 0 +} + +# args: +# user_defined_os - $1 +get_normalized_os() { + eval $invocation + + local osname="$(to_lowercase "$1")" + if [ ! -z "$osname" ]; then + case "$osname" in + osx | freebsd | rhel.6 | linux-musl | linux) + echo "$osname" + return 0 + ;; + macos) + osname='osx' + echo "$osname" + return 0 + ;; + *) + say_err "'$user_defined_os' is not a supported value for --os option, supported values are: osx, macos, linux, linux-musl, freebsd, rhel.6. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues." + return 1 + ;; + esac + else + osname="$(get_current_os_name)" || return 1 + fi + echo "$osname" + return 0 +} + +# args: +# quality - $1 +get_normalized_quality() { + eval $invocation + + local quality="$(to_lowercase "$1")" + if [ ! -z "$quality" ]; then + case "$quality" in + daily | signed | validated | preview) + echo "$quality" + return 0 + ;; + ga) + #ga quality is available without specifying quality, so normalizing it to empty + return 0 + ;; + *) + say_err "'$quality' is not a supported value for --quality option. Supported values are: daily, signed, validated, preview, ga. If you think this is a bug, report it at https://github.com/dotnet/install-scripts/issues." + return 1 + ;; + esac + fi + return 0 +} + +# args: +# channel - $1 +get_normalized_channel() { + eval $invocation + + local channel="$(to_lowercase "$1")" + + if [[ $channel == current ]]; then + say_warning 'Value "Current" is deprecated for -Channel option. Use "STS" instead.' + fi + + if [[ $channel == release/* ]]; then + say_warning 'Using branch name with -Channel option is no longer supported with newer releases. Use -Quality option with a channel in X.Y format instead.'; + fi + + if [ ! -z "$channel" ]; then + case "$channel" in + lts) + echo "LTS" + return 0 + ;; + sts) + echo "STS" + return 0 + ;; + current) + echo "STS" + return 0 + ;; + *) + echo "$channel" + return 0 + ;; + esac + fi + + return 0 +} + +# args: +# runtime - $1 +get_normalized_product() { + eval $invocation + + local product="" + local runtime="$(to_lowercase "$1")" + if [[ "$runtime" == "dotnet" ]]; then + product="dotnet-runtime" + elif [[ "$runtime" == "aspnetcore" ]]; then + product="aspnetcore-runtime" + elif [ -z "$runtime" ]; then + product="dotnet-sdk" + fi + echo "$product" + return 0 +} + +# The version text returned from the feeds is a 1-line or 2-line string: +# For the SDK and the dotnet runtime (2 lines): +# Line 1: # commit_hash +# Line 2: # 4-part version +# For the aspnetcore runtime (1 line): +# Line 1: # 4-part version + +# args: +# version_text - stdin +get_version_from_latestversion_file_content() { + eval $invocation + + cat | tail -n 1 | sed 's/\r$//' + return 0 +} + +# args: +# install_root - $1 +# relative_path_to_package - $2 +# specific_version - $3 +is_dotnet_package_installed() { + eval $invocation + + local install_root="$1" + local relative_path_to_package="$2" + local specific_version="${3//[$'\t\r\n']}" + + local dotnet_package_path="$(combine_paths "$(combine_paths "$install_root" "$relative_path_to_package")" "$specific_version")" + say_verbose "is_dotnet_package_installed: dotnet_package_path=$dotnet_package_path" + + if [ -d "$dotnet_package_path" ]; then + return 0 + else + return 1 + fi +} + +# args: +# downloaded file - $1 +# remote_file_size - $2 +validate_remote_local_file_sizes() +{ + eval $invocation + + local downloaded_file="$1" + local remote_file_size="$2" + local file_size='' + + if [[ "$OSTYPE" == "linux-gnu"* ]]; then + file_size="$(stat -c '%s' "$downloaded_file")" + elif [[ "$OSTYPE" == "darwin"* ]]; then + # hardcode in order to avoid conflicts with GNU stat + file_size="$(/usr/bin/stat -f '%z' "$downloaded_file")" + fi + + if [ -n "$file_size" ]; then + say "Downloaded file size is $file_size bytes." + + if [ -n "$remote_file_size" ] && [ -n "$file_size" ]; then + if [ "$remote_file_size" -ne "$file_size" ]; then + say "The remote and local file sizes are not equal. The remote file size is $remote_file_size bytes and the local size is $file_size bytes. The local package may be corrupted." + else + say "The remote and local file sizes are equal." + fi + fi + + else + say "Either downloaded or local package size can not be measured. One of them may be corrupted." + fi +} + +# args: +# azure_feed - $1 +# channel - $2 +# normalized_architecture - $3 +get_version_from_latestversion_file() { + eval $invocation + + local azure_feed="$1" + local channel="$2" + local normalized_architecture="$3" + + local version_file_url=null + if [[ "$runtime" == "dotnet" ]]; then + version_file_url="$azure_feed/Runtime/$channel/latest.version" + elif [[ "$runtime" == "aspnetcore" ]]; then + version_file_url="$azure_feed/aspnetcore/Runtime/$channel/latest.version" + elif [ -z "$runtime" ]; then + version_file_url="$azure_feed/Sdk/$channel/latest.version" + else + say_err "Invalid value for \$runtime" + return 1 + fi + say_verbose "get_version_from_latestversion_file: latest url: $version_file_url" + + download "$version_file_url" || return $? + return 0 +} + +# args: +# json_file - $1 +parse_globaljson_file_for_version() { + eval $invocation + + local json_file="$1" + if [ ! -f "$json_file" ]; then + say_err "Unable to find \`$json_file\`" + return 1 + fi + + sdk_section=$(cat $json_file | tr -d "\r" | awk '/"sdk"/,/}/') + if [ -z "$sdk_section" ]; then + say_err "Unable to parse the SDK node in \`$json_file\`" + return 1 + fi + + sdk_list=$(echo $sdk_section | awk -F"[{}]" '{print $2}') + sdk_list=${sdk_list//[\" ]/} + sdk_list=${sdk_list//,/$'\n'} + + local version_info="" + while read -r line; do + IFS=: + while read -r key value; do + if [[ "$key" == "version" ]]; then + version_info=$value + fi + done <<< "$line" + done <<< "$sdk_list" + if [ -z "$version_info" ]; then + say_err "Unable to find the SDK:version node in \`$json_file\`" + return 1 + fi + + unset IFS; + echo "$version_info" + return 0 +} + +# args: +# azure_feed - $1 +# channel - $2 +# normalized_architecture - $3 +# version - $4 +# json_file - $5 +get_specific_version_from_version() { + eval $invocation + + local azure_feed="$1" + local channel="$2" + local normalized_architecture="$3" + local version="$(to_lowercase "$4")" + local json_file="$5" + + if [ -z "$json_file" ]; then + if [[ "$version" == "latest" ]]; then + local version_info + version_info="$(get_version_from_latestversion_file "$azure_feed" "$channel" "$normalized_architecture" false)" || return 1 + say_verbose "get_specific_version_from_version: version_info=$version_info" + echo "$version_info" | get_version_from_latestversion_file_content + return 0 + else + echo "$version" + return 0 + fi + else + local version_info + version_info="$(parse_globaljson_file_for_version "$json_file")" || return 1 + echo "$version_info" + return 0 + fi +} + +# args: +# azure_feed - $1 +# channel - $2 +# normalized_architecture - $3 +# specific_version - $4 +# normalized_os - $5 +construct_download_link() { + eval $invocation + + local azure_feed="$1" + local channel="$2" + local normalized_architecture="$3" + local specific_version="${4//[$'\t\r\n']}" + local specific_product_version="$(get_specific_product_version "$1" "$4")" + local osname="$5" + + local download_link=null + if [[ "$runtime" == "dotnet" ]]; then + download_link="$azure_feed/Runtime/$specific_version/dotnet-runtime-$specific_product_version-$osname-$normalized_architecture.tar.gz" + elif [[ "$runtime" == "aspnetcore" ]]; then + download_link="$azure_feed/aspnetcore/Runtime/$specific_version/aspnetcore-runtime-$specific_product_version-$osname-$normalized_architecture.tar.gz" + elif [ -z "$runtime" ]; then + download_link="$azure_feed/Sdk/$specific_version/dotnet-sdk-$specific_product_version-$osname-$normalized_architecture.tar.gz" + else + return 1 + fi + + echo "$download_link" + return 0 +} + +# args: +# azure_feed - $1 +# specific_version - $2 +# download link - $3 (optional) +get_specific_product_version() { + # If we find a 'productVersion.txt' at the root of any folder, we'll use its contents + # to resolve the version of what's in the folder, superseding the specified version. + # if 'productVersion.txt' is missing but download link is already available, product version will be taken from download link + eval $invocation + + local azure_feed="$1" + local specific_version="${2//[$'\t\r\n']}" + local package_download_link="" + if [ $# -gt 2 ]; then + local package_download_link="$3" + fi + local specific_product_version=null + + # Try to get the version number, using the productVersion.txt file located next to the installer file. + local download_links=($(get_specific_product_version_url "$azure_feed" "$specific_version" true "$package_download_link") + $(get_specific_product_version_url "$azure_feed" "$specific_version" false "$package_download_link")) + + for download_link in "${download_links[@]}" + do + say_verbose "Checking for the existence of $download_link" + + if machine_has "curl" + then + if ! specific_product_version=$(curl -s --fail "${download_link}${feed_credential}" 2>&1); then + continue + else + echo "${specific_product_version//[$'\t\r\n']}" + return 0 + fi + + elif machine_has "wget" + then + specific_product_version=$(wget -qO- "${download_link}${feed_credential}" 2>&1) + if [ $? = 0 ]; then + echo "${specific_product_version//[$'\t\r\n']}" + return 0 + fi + fi + done + + # Getting the version number with productVersion.txt has failed. Try parsing the download link for a version number. + say_verbose "Failed to get the version using productVersion.txt file. Download link will be parsed instead." + specific_product_version="$(get_product_specific_version_from_download_link "$package_download_link" "$specific_version")" + echo "${specific_product_version//[$'\t\r\n']}" + return 0 +} + +# args: +# azure_feed - $1 +# specific_version - $2 +# is_flattened - $3 +# download link - $4 (optional) +get_specific_product_version_url() { + eval $invocation + + local azure_feed="$1" + local specific_version="$2" + local is_flattened="$3" + local package_download_link="" + if [ $# -gt 3 ]; then + local package_download_link="$4" + fi + + local pvFileName="productVersion.txt" + if [ "$is_flattened" = true ]; then + if [ -z "$runtime" ]; then + pvFileName="sdk-productVersion.txt" + elif [[ "$runtime" == "dotnet" ]]; then + pvFileName="runtime-productVersion.txt" + else + pvFileName="$runtime-productVersion.txt" + fi + fi + + local download_link=null + + if [ -z "$package_download_link" ]; then + if [[ "$runtime" == "dotnet" ]]; then + download_link="$azure_feed/Runtime/$specific_version/${pvFileName}" + elif [[ "$runtime" == "aspnetcore" ]]; then + download_link="$azure_feed/aspnetcore/Runtime/$specific_version/${pvFileName}" + elif [ -z "$runtime" ]; then + download_link="$azure_feed/Sdk/$specific_version/${pvFileName}" + else + return 1 + fi + else + download_link="${package_download_link%/*}/${pvFileName}" + fi + + say_verbose "Constructed productVersion link: $download_link" + echo "$download_link" + return 0 +} + +# args: +# download link - $1 +# specific version - $2 +get_product_specific_version_from_download_link() +{ + eval $invocation + + local download_link="$1" + local specific_version="$2" + local specific_product_version="" + + if [ -z "$download_link" ]; then + echo "$specific_version" + return 0 + fi + + #get filename + filename="${download_link##*/}" + + #product specific version follows the product name + #for filename 'dotnet-sdk-3.1.404-linux-x64.tar.gz': the product version is 3.1.404 + IFS='-' + read -ra filename_elems <<< "$filename" + count=${#filename_elems[@]} + if [[ "$count" -gt 2 ]]; then + specific_product_version="${filename_elems[2]}" + else + specific_product_version=$specific_version + fi + unset IFS; + echo "$specific_product_version" + return 0 +} + +# args: +# azure_feed - $1 +# channel - $2 +# normalized_architecture - $3 +# specific_version - $4 +construct_legacy_download_link() { + eval $invocation + + local azure_feed="$1" + local channel="$2" + local normalized_architecture="$3" + local specific_version="${4//[$'\t\r\n']}" + + local distro_specific_osname + distro_specific_osname="$(get_legacy_os_name)" || return 1 + + local legacy_download_link=null + if [[ "$runtime" == "dotnet" ]]; then + legacy_download_link="$azure_feed/Runtime/$specific_version/dotnet-$distro_specific_osname-$normalized_architecture.$specific_version.tar.gz" + elif [ -z "$runtime" ]; then + legacy_download_link="$azure_feed/Sdk/$specific_version/dotnet-dev-$distro_specific_osname-$normalized_architecture.$specific_version.tar.gz" + else + return 1 + fi + + echo "$legacy_download_link" + return 0 +} + +get_user_install_path() { + eval $invocation + + if [ ! -z "${DOTNET_INSTALL_DIR:-}" ]; then + echo "$DOTNET_INSTALL_DIR" + else + echo "$HOME/.dotnet" + fi + return 0 +} + +# args: +# install_dir - $1 +resolve_installation_path() { + eval $invocation + + local install_dir=$1 + if [ "$install_dir" = "" ]; then + local user_install_path="$(get_user_install_path)" + say_verbose "resolve_installation_path: user_install_path=$user_install_path" + echo "$user_install_path" + return 0 + fi + + echo "$install_dir" + return 0 +} + +# args: +# relative_or_absolute_path - $1 +get_absolute_path() { + eval $invocation + + local relative_or_absolute_path=$1 + echo "$(cd "$(dirname "$1")" && pwd -P)/$(basename "$1")" + return 0 +} + +# args: +# override - $1 (boolean, true or false) +get_cp_options() { + eval $invocation + + local override="$1" + local override_switch="" + + if [ "$override" = false ]; then + override_switch="-n" + + # create temporary files to check if 'cp -u' is supported + tmp_dir="$(mktemp -d)" + tmp_file="$tmp_dir/testfile" + tmp_file2="$tmp_dir/testfile2" + + touch "$tmp_file" + + # use -u instead of -n if it's available + if cp -u "$tmp_file" "$tmp_file2" 2>/dev/null; then + override_switch="-u" + fi + + # clean up + rm -f "$tmp_file" "$tmp_file2" + rm -rf "$tmp_dir" + fi + + echo "$override_switch" +} + +# args: +# input_files - stdin +# root_path - $1 +# out_path - $2 +# override - $3 +copy_files_or_dirs_from_list() { + eval $invocation + + local root_path="$(remove_trailing_slash "$1")" + local out_path="$(remove_trailing_slash "$2")" + local override="$3" + local override_switch="$(get_cp_options "$override")" + + cat | uniq | while read -r file_path; do + local path="$(remove_beginning_slash "${file_path#$root_path}")" + local target="$out_path/$path" + if [ "$override" = true ] || (! ([ -d "$target" ] || [ -e "$target" ])); then + mkdir -p "$out_path/$(dirname "$path")" + if [ -d "$target" ]; then + rm -rf "$target" + fi + cp -R $override_switch "$root_path/$path" "$target" + fi + done +} + +# args: +# zip_uri - $1 +get_remote_file_size() { + local zip_uri="$1" + + if machine_has "curl"; then + file_size=$(curl -sI "$zip_uri" | grep -i content-length | awk '{ num = $2 + 0; print num }') + elif machine_has "wget"; then + file_size=$(wget --spider --server-response -O /dev/null "$zip_uri" 2>&1 | grep -i 'Content-Length:' | awk '{ num = $2 + 0; print num }') + else + say "Neither curl nor wget is available on this system." + return + fi + + if [ -n "$file_size" ]; then + say "Remote file $zip_uri size is $file_size bytes." + echo "$file_size" + else + say_verbose "Content-Length header was not extracted for $zip_uri." + echo "" + fi +} + +# args: +# zip_path - $1 +# out_path - $2 +# remote_file_size - $3 +extract_dotnet_package() { + eval $invocation + + local zip_path="$1" + local out_path="$2" + local remote_file_size="$3" + + local temp_out_path="$(mktemp -d "$temporary_file_template")" + + local failed=false + tar -xzf "$zip_path" -C "$temp_out_path" > /dev/null || failed=true + + local folders_with_version_regex='^.*/[0-9]+\.[0-9]+[^/]+/' + find "$temp_out_path" -type f | grep -Eo "$folders_with_version_regex" | sort | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" false + find "$temp_out_path" -type f | grep -Ev "$folders_with_version_regex" | copy_files_or_dirs_from_list "$temp_out_path" "$out_path" "$override_non_versioned_files" + + validate_remote_local_file_sizes "$zip_path" "$remote_file_size" + + rm -rf "$temp_out_path" + if [ -z ${keep_zip+x} ]; then + rm -f "$zip_path" && say_verbose "Temporary archive file $zip_path was removed" + fi + + if [ "$failed" = true ]; then + say_err "Extraction failed" + return 1 + fi + return 0 +} + +# args: +# remote_path - $1 +# disable_feed_credential - $2 +get_http_header() +{ + eval $invocation + local remote_path="$1" + local disable_feed_credential="$2" + + local failed=false + local response + if machine_has "curl"; then + get_http_header_curl $remote_path $disable_feed_credential || failed=true + elif machine_has "wget"; then + get_http_header_wget $remote_path $disable_feed_credential || failed=true + else + failed=true + fi + if [ "$failed" = true ]; then + say_verbose "Failed to get HTTP header: '$remote_path'." + return 1 + fi + return 0 +} + +# args: +# remote_path - $1 +# disable_feed_credential - $2 +get_http_header_curl() { + eval $invocation + local remote_path="$1" + local disable_feed_credential="$2" + + remote_path_with_credential="$remote_path" + if [ "$disable_feed_credential" = false ]; then + remote_path_with_credential+="$feed_credential" + fi + + curl_options="-I -sSL --retry 5 --retry-delay 2 --connect-timeout 15 " + curl $curl_options "$remote_path_with_credential" 2>&1 || return 1 + return 0 +} + +# args: +# remote_path - $1 +# disable_feed_credential - $2 +get_http_header_wget() { + eval $invocation + local remote_path="$1" + local disable_feed_credential="$2" + local wget_options="-q -S --spider --tries 5 " + + local wget_options_extra='' + + # Test for options that aren't supported on all wget implementations. + if [[ $(wget -h 2>&1 | grep -E 'waitretry|connect-timeout') ]]; then + wget_options_extra="--waitretry 2 --connect-timeout 15 " + else + say "wget extra options are unavailable for this environment" + fi + + remote_path_with_credential="$remote_path" + if [ "$disable_feed_credential" = false ]; then + remote_path_with_credential+="$feed_credential" + fi + + wget $wget_options $wget_options_extra "$remote_path_with_credential" 2>&1 + + return $? +} + +# args: +# remote_path - $1 +# [out_path] - $2 - stdout if not provided +download() { + eval $invocation + + local remote_path="$1" + local out_path="${2:-}" + + if [[ "$remote_path" != "http"* ]]; then + cp "$remote_path" "$out_path" + return $? + fi + + local failed=false + local attempts=0 + while [ $attempts -lt 3 ]; do + attempts=$((attempts+1)) + failed=false + if machine_has "curl"; then + downloadcurl "$remote_path" "$out_path" || failed=true + elif machine_has "wget"; then + downloadwget "$remote_path" "$out_path" || failed=true + else + say_err "Missing dependency: neither curl nor wget was found." + exit 1 + fi + + if [ "$failed" = false ] || [ $attempts -ge 3 ] || { [ ! -z $http_code ] && [ $http_code = "404" ]; }; then + break + fi + + say "Download attempt #$attempts has failed: $http_code $download_error_msg" + say "Attempt #$((attempts+1)) will start in $((attempts*10)) seconds." + sleep $((attempts*10)) + done + + if [ "$failed" = true ]; then + say_verbose "Download failed: $remote_path" + return 1 + fi + return 0 +} + +# Updates global variables $http_code and $download_error_msg +downloadcurl() { + eval $invocation + unset http_code + unset download_error_msg + local remote_path="$1" + local out_path="${2:-}" + # Append feed_credential as late as possible before calling curl to avoid logging feed_credential + # Avoid passing URI with credentials to functions: note, most of them echoing parameters of invocation in verbose output. + local remote_path_with_credential="${remote_path}${feed_credential}" + local curl_options="--retry 20 --retry-delay 2 --connect-timeout 15 -sSL -f --create-dirs " + local curl_exit_code=0; + if [ -z "$out_path" ]; then + curl $curl_options "$remote_path_with_credential" 2>&1 + curl_exit_code=$? + else + curl $curl_options -o "$out_path" "$remote_path_with_credential" 2>&1 + curl_exit_code=$? + fi + + if [ $curl_exit_code -gt 0 ]; then + download_error_msg="Unable to download $remote_path." + # Check for curl timeout codes + if [[ $curl_exit_code == 7 || $curl_exit_code == 28 ]]; then + download_error_msg+=" Failed to reach the server: connection timeout." + else + local disable_feed_credential=false + local response=$(get_http_header_curl $remote_path $disable_feed_credential) + http_code=$( echo "$response" | awk '/^HTTP/{print $2}' | tail -1 ) + if [[ ! -z $http_code && $http_code != 2* ]]; then + download_error_msg+=" Returned HTTP status code: $http_code." + fi + fi + say_verbose "$download_error_msg" + return 1 + fi + return 0 +} + + +# Updates global variables $http_code and $download_error_msg +downloadwget() { + eval $invocation + unset http_code + unset download_error_msg + local remote_path="$1" + local out_path="${2:-}" + # Append feed_credential as late as possible before calling wget to avoid logging feed_credential + local remote_path_with_credential="${remote_path}${feed_credential}" + local wget_options="--tries 20 " + + local wget_options_extra='' + local wget_result='' + + # Test for options that aren't supported on all wget implementations. + if [[ $(wget -h 2>&1 | grep -E 'waitretry|connect-timeout') ]]; then + wget_options_extra="--waitretry 2 --connect-timeout 15 " + else + say "wget extra options are unavailable for this environment" + fi + + if [ -z "$out_path" ]; then + wget -q $wget_options $wget_options_extra -O - "$remote_path_with_credential" 2>&1 + wget_result=$? + else + wget $wget_options $wget_options_extra -O "$out_path" "$remote_path_with_credential" 2>&1 + wget_result=$? + fi + + if [[ $wget_result != 0 ]]; then + local disable_feed_credential=false + local response=$(get_http_header_wget $remote_path $disable_feed_credential) + http_code=$( echo "$response" | awk '/^ HTTP/{print $2}' | tail -1 ) + download_error_msg="Unable to download $remote_path." + if [[ ! -z $http_code && $http_code != 2* ]]; then + download_error_msg+=" Returned HTTP status code: $http_code." + # wget exit code 4 stands for network-issue + elif [[ $wget_result == 4 ]]; then + download_error_msg+=" Failed to reach the server: connection timeout." + fi + say_verbose "$download_error_msg" + return 1 + fi + + return 0 +} + +extract_stem() { + local url="$1" + # extract the protocol + proto="$(echo $1 | grep :// | sed -e's,^\(.*://\).*,\1,g')" + # remove the protocol + url="${1/$proto/}" + # extract the path (if any) - since we know all of our feeds have a first path segment, we can skip the first one. otherwise we'd use -f2- to get the full path + full_path="$(echo $url | grep / | cut -d/ -f2-)" + path="$(echo $full_path | cut -d/ -f2-)" + echo $path +} + +check_url_exists() { + eval $invocation + local url="$1" + + local code="" + if machine_has "curl" + then + code=$(curl --head -o /dev/null -w "%{http_code}" -s --fail "$url"); + elif machine_has "wget" + then + # get the http response, grab the status code + server_response=$(wget -qO- --method=HEAD --server-response "$url" 2>&1) + code=$(echo "$server_response" | grep "HTTP/" | awk '{print $2}') + fi + if [ $code = "200" ]; then + return 0 + else + return 1 + fi +} + +sanitize_redirect_url() { + eval $invocation + + local url_stem + url_stem=$(extract_stem "$1") + say_verbose "Checking configured feeds for the asset at ${yellow:-}$url_stem${normal:-}" + + for feed in "${feeds[@]}" + do + local trial_url="$feed/$url_stem" + say_verbose "Checking ${yellow:-}$trial_url${normal:-}" + if check_url_exists "$trial_url"; then + say_verbose "Found a match at ${yellow:-}$trial_url${normal:-}" + echo "$trial_url" + return 0 + else + say_verbose "No match at ${yellow:-}$trial_url${normal:-}" + fi + done + return 1 +} + +get_download_link_from_aka_ms() { + eval $invocation + + #quality is not supported for LTS or STS channel + #STS maps to current + if [[ ! -z "$normalized_quality" && ("$normalized_channel" == "LTS" || "$normalized_channel" == "STS") ]]; then + normalized_quality="" + say_warning "Specifying quality for STS or LTS channel is not supported, the quality will be ignored." + fi + + say_verbose "Retrieving primary payload URL from aka.ms for channel: '$normalized_channel', quality: '$normalized_quality', product: '$normalized_product', os: '$normalized_os', architecture: '$normalized_architecture'." + + #construct aka.ms link + aka_ms_link="https://aka.ms/dotnet" + if [ "$internal" = true ]; then + aka_ms_link="$aka_ms_link/internal" + fi + aka_ms_link="$aka_ms_link/$normalized_channel" + if [[ ! -z "$normalized_quality" ]]; then + aka_ms_link="$aka_ms_link/$normalized_quality" + fi + aka_ms_link="$aka_ms_link/$normalized_product-$normalized_os-$normalized_architecture.tar.gz" + say_verbose "Constructed aka.ms link: '$aka_ms_link'." + + #get HTTP response + #do not pass credentials as a part of the $aka_ms_link and do not apply credentials in the get_http_header function + #otherwise the redirect link would have credentials as well + #it would result in applying credentials twice to the resulting link and thus breaking it, and in echoing credentials to the output as a part of redirect link + disable_feed_credential=true + response="$(get_http_header $aka_ms_link $disable_feed_credential)" + + say_verbose "Received response: $response" + # Get results of all the redirects. + http_codes=$( echo "$response" | awk '$1 ~ /^HTTP/ {print $2}' ) + # They all need to be 301, otherwise some links are broken (except for the last, which is not a redirect but 200 or 404). + broken_redirects=$( echo "$http_codes" | sed '$d' | grep -v '301' ) + # The response may end without final code 2xx/4xx/5xx somehow, e.g. network restrictions on www.bing.com causes redirecting to bing.com fails with connection refused. + # In this case it should not exclude the last. + last_http_code=$( echo "$http_codes" | tail -n 1 ) + if ! [[ $last_http_code =~ ^(2|4|5)[0-9][0-9]$ ]]; then + broken_redirects=$( echo "$http_codes" | grep -v '301' ) + fi + + # All HTTP codes are 301 (Moved Permanently), the redirect link exists. + if [[ -z "$broken_redirects" ]]; then + aka_ms_download_link=$( echo "$response" | awk '$1 ~ /^Location/{print $2}' | tail -1 | tr -d '\r') + + if [[ -z "$aka_ms_download_link" ]]; then + say_verbose "The aka.ms link '$aka_ms_link' is not valid: failed to get redirect location." + return 1 + fi + + sanitized_redirect_url=$(sanitize_redirect_url "$aka_ms_download_link") + if [[ -n "$sanitized_redirect_url" ]]; then + aka_ms_download_link="$sanitized_redirect_url" + fi + + say_verbose "The redirect location retrieved: '$aka_ms_download_link'." + return 0 + else + say_verbose "The aka.ms link '$aka_ms_link' is not valid: received HTTP code: $(echo "$broken_redirects" | paste -sd "," -)." + return 1 + fi +} + +get_feeds_to_use() +{ + feeds=( + "https://builds.dotnet.microsoft.com/dotnet" + "https://dotnetcli.azureedge.net/dotnet" + "https://ci.dot.net/public" + "https://dotnetbuilds.azureedge.net/public" + ) + + if [[ -n "$azure_feed" ]]; then + feeds=("$azure_feed") + fi + + if [[ "$no_cdn" == "true" ]]; then + feeds=( + "https://dotnetcli.blob.core.windows.net/dotnet" + "https://dotnetbuilds.blob.core.windows.net/public" + ) + + if [[ -n "$uncached_feed" ]]; then + feeds=("$uncached_feed") + fi + fi +} + +# THIS FUNCTION MAY EXIT (if the determined version is already installed). +generate_download_links() { + + download_links=() + specific_versions=() + effective_versions=() + link_types=() + + # If generate_akams_links returns false, no fallback to old links. Just terminate. + # This function may also 'exit' (if the determined version is already installed). + generate_akams_links || return + + # Check other feeds only if we haven't been able to find an aka.ms link. + if [[ "${#download_links[@]}" -lt 1 ]]; then + for feed in ${feeds[@]} + do + # generate_regular_links may also 'exit' (if the determined version is already installed). + generate_regular_links $feed || return + done + fi + + if [[ "${#download_links[@]}" -eq 0 ]]; then + say_err "Failed to resolve the exact version number." + return 1 + fi + + say_verbose "Generated ${#download_links[@]} links." + for link_index in ${!download_links[@]} + do + say_verbose "Link $link_index: ${link_types[$link_index]}, ${effective_versions[$link_index]}, ${download_links[$link_index]}" + done +} + +# THIS FUNCTION MAY EXIT (if the determined version is already installed). +generate_akams_links() { + local valid_aka_ms_link=true; + + normalized_version="$(to_lowercase "$version")" + if [[ "$normalized_version" != "latest" ]] && [ -n "$normalized_quality" ]; then + say_err "Quality and Version options are not allowed to be specified simultaneously. See https://learn.microsoft.com/dotnet/core/tools/dotnet-install-script#options for details." + return 1 + fi + + if [[ -n "$json_file" || "$normalized_version" != "latest" ]]; then + # aka.ms links are not needed when exact version is specified via command or json file + return + fi + + get_download_link_from_aka_ms || valid_aka_ms_link=false + + if [[ "$valid_aka_ms_link" == true ]]; then + say_verbose "Retrieved primary payload URL from aka.ms link: '$aka_ms_download_link'." + say_verbose "Downloading using legacy url will not be attempted." + + download_link=$aka_ms_download_link + + #get version from the path + IFS='/' + read -ra pathElems <<< "$download_link" + count=${#pathElems[@]} + specific_version="${pathElems[count-2]}" + unset IFS; + say_verbose "Version: '$specific_version'." + + #Retrieve effective version + effective_version="$(get_specific_product_version "$azure_feed" "$specific_version" "$download_link")" + + # Add link info to arrays + download_links+=($download_link) + specific_versions+=($specific_version) + effective_versions+=($effective_version) + link_types+=("aka.ms") + + # Check if the SDK version is already installed. + if [[ "$dry_run" != true ]] && is_dotnet_package_installed "$install_root" "$asset_relative_path" "$effective_version"; then + say "$asset_name with version '$effective_version' is already installed." + exit 0 + fi + + return 0 + fi + + # if quality is specified - exit with error - there is no fallback approach + if [ ! -z "$normalized_quality" ]; then + say_err "Failed to locate the latest version in the channel '$normalized_channel' with '$normalized_quality' quality for '$normalized_product', os: '$normalized_os', architecture: '$normalized_architecture'." + say_err "Refer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support." + return 1 + fi + say_verbose "Falling back to latest.version file approach." +} + +# THIS FUNCTION MAY EXIT (if the determined version is already installed) +# args: +# feed - $1 +generate_regular_links() { + local feed="$1" + local valid_legacy_download_link=true + + specific_version=$(get_specific_version_from_version "$feed" "$channel" "$normalized_architecture" "$version" "$json_file") || specific_version='0' + + if [[ "$specific_version" == '0' ]]; then + say_verbose "Failed to resolve the specific version number using feed '$feed'" + return + fi + + effective_version="$(get_specific_product_version "$feed" "$specific_version")" + say_verbose "specific_version=$specific_version" + + download_link="$(construct_download_link "$feed" "$channel" "$normalized_architecture" "$specific_version" "$normalized_os")" + say_verbose "Constructed primary named payload URL: $download_link" + + # Add link info to arrays + download_links+=($download_link) + specific_versions+=($specific_version) + effective_versions+=($effective_version) + link_types+=("primary") + + legacy_download_link="$(construct_legacy_download_link "$feed" "$channel" "$normalized_architecture" "$specific_version")" || valid_legacy_download_link=false + + if [ "$valid_legacy_download_link" = true ]; then + say_verbose "Constructed legacy named payload URL: $legacy_download_link" + + download_links+=($legacy_download_link) + specific_versions+=($specific_version) + effective_versions+=($effective_version) + link_types+=("legacy") + else + legacy_download_link="" + say_verbose "Cound not construct a legacy_download_link; omitting..." + fi + + # Check if the SDK version is already installed. + if [[ "$dry_run" != true ]] && is_dotnet_package_installed "$install_root" "$asset_relative_path" "$effective_version"; then + say "$asset_name with version '$effective_version' is already installed." + exit 0 + fi +} + +print_dry_run() { + + say "Payload URLs:" + + for link_index in "${!download_links[@]}" + do + say "URL #$link_index - ${link_types[$link_index]}: ${download_links[$link_index]}" + done + + resolved_version=${specific_versions[0]} + repeatable_command="./$script_name --version "\""$resolved_version"\"" --install-dir "\""$install_root"\"" --architecture "\""$normalized_architecture"\"" --os "\""$normalized_os"\""" + + if [ ! -z "$normalized_quality" ]; then + repeatable_command+=" --quality "\""$normalized_quality"\""" + fi + + if [[ "$runtime" == "dotnet" ]]; then + repeatable_command+=" --runtime "\""dotnet"\""" + elif [[ "$runtime" == "aspnetcore" ]]; then + repeatable_command+=" --runtime "\""aspnetcore"\""" + fi + + repeatable_command+="$non_dynamic_parameters" + + if [ -n "$feed_credential" ]; then + repeatable_command+=" --feed-credential "\"""\""" + fi + + say "Repeatable invocation: $repeatable_command" +} + +calculate_vars() { + eval $invocation + + script_name=$(basename "$0") + normalized_architecture="$(get_normalized_architecture_from_architecture "$architecture")" + say_verbose "Normalized architecture: '$normalized_architecture'." + normalized_os="$(get_normalized_os "$user_defined_os")" + say_verbose "Normalized OS: '$normalized_os'." + normalized_quality="$(get_normalized_quality "$quality")" + say_verbose "Normalized quality: '$normalized_quality'." + normalized_channel="$(get_normalized_channel "$channel")" + say_verbose "Normalized channel: '$normalized_channel'." + normalized_product="$(get_normalized_product "$runtime")" + say_verbose "Normalized product: '$normalized_product'." + install_root="$(resolve_installation_path "$install_dir")" + say_verbose "InstallRoot: '$install_root'." + + normalized_architecture="$(get_normalized_architecture_for_specific_sdk_version "$version" "$normalized_channel" "$normalized_architecture")" + + if [[ "$runtime" == "dotnet" ]]; then + asset_relative_path="shared/Microsoft.NETCore.App" + asset_name=".NET Core Runtime" + elif [[ "$runtime" == "aspnetcore" ]]; then + asset_relative_path="shared/Microsoft.AspNetCore.App" + asset_name="ASP.NET Core Runtime" + elif [ -z "$runtime" ]; then + asset_relative_path="sdk" + asset_name=".NET Core SDK" + fi + + get_feeds_to_use +} + +install_dotnet() { + eval $invocation + local download_failed=false + local download_completed=false + local remote_file_size=0 + + mkdir -p "$install_root" + zip_path="${zip_path:-$(mktemp "$temporary_file_template")}" + say_verbose "Archive path: $zip_path" + + for link_index in "${!download_links[@]}" + do + download_link="${download_links[$link_index]}" + specific_version="${specific_versions[$link_index]}" + effective_version="${effective_versions[$link_index]}" + link_type="${link_types[$link_index]}" + + say "Attempting to download using $link_type link $download_link" + + # The download function will set variables $http_code and $download_error_msg in case of failure. + download_failed=false + download "$download_link" "$zip_path" 2>&1 || download_failed=true + + if [ "$download_failed" = true ]; then + case $http_code in + 404) + say "The resource at $link_type link '$download_link' is not available." + ;; + *) + say "Failed to download $link_type link '$download_link': $download_error_msg" + ;; + esac + rm -f "$zip_path" 2>&1 && say_verbose "Temporary archive file $zip_path was removed" + else + download_completed=true + break + fi + done + + if [[ "$download_completed" == false ]]; then + say_err "Could not find \`$asset_name\` with version = $specific_version" + say_err "Refer to: https://aka.ms/dotnet-os-lifecycle for information on .NET Core support" + return 1 + fi + + remote_file_size="$(get_remote_file_size "$download_link")" + + say "Extracting archive from $download_link" + extract_dotnet_package "$zip_path" "$install_root" "$remote_file_size" || return 1 + + # Check if the SDK version is installed; if not, fail the installation. + # if the version contains "RTM" or "servicing"; check if a 'release-type' SDK version is installed. + if [[ $specific_version == *"rtm"* || $specific_version == *"servicing"* ]]; then + IFS='-' + read -ra verArr <<< "$specific_version" + release_version="${verArr[0]}" + unset IFS; + say_verbose "Checking installation: version = $release_version" + if is_dotnet_package_installed "$install_root" "$asset_relative_path" "$release_version"; then + say "Installed version is $effective_version" + return 0 + fi + fi + + # Check if the standard SDK version is installed. + say_verbose "Checking installation: version = $effective_version" + if is_dotnet_package_installed "$install_root" "$asset_relative_path" "$effective_version"; then + say "Installed version is $effective_version" + return 0 + fi + + # Version verification failed. More likely something is wrong either with the downloaded content or with the verification algorithm. + say_err "Failed to verify the version of installed \`$asset_name\`.\nInstallation source: $download_link.\nInstallation location: $install_root.\nReport the bug at https://github.com/dotnet/install-scripts/issues." + say_err "\`$asset_name\` with version = $effective_version failed to install with an error." + return 1 +} + +args=("$@") + +local_version_file_relative_path="/.version" +bin_folder_relative_path="" +temporary_file_template="${TMPDIR:-/tmp}/dotnet.XXXXXXXXX" + +channel="LTS" +version="Latest" +json_file="" +install_dir="" +architecture="" +dry_run=false +no_path=false +no_cdn=false +azure_feed="" +uncached_feed="" +feed_credential="" +verbose=false +runtime="" +runtime_id="" +quality="" +internal=false +override_non_versioned_files=true +non_dynamic_parameters="" +user_defined_os="" + +while [ $# -ne 0 ] +do + name="$1" + case "$name" in + -c|--channel|-[Cc]hannel) + shift + channel="$1" + ;; + -v|--version|-[Vv]ersion) + shift + version="$1" + ;; + -q|--quality|-[Qq]uality) + shift + quality="$1" + ;; + --internal|-[Ii]nternal) + internal=true + non_dynamic_parameters+=" $name" + ;; + -i|--install-dir|-[Ii]nstall[Dd]ir) + shift + install_dir="$1" + ;; + --arch|--architecture|-[Aa]rch|-[Aa]rchitecture) + shift + architecture="$1" + ;; + --os|-[Oo][SS]) + shift + user_defined_os="$1" + ;; + --shared-runtime|-[Ss]hared[Rr]untime) + say_warning "The --shared-runtime flag is obsolete and may be removed in a future version of this script. The recommended usage is to specify '--runtime dotnet'." + if [ -z "$runtime" ]; then + runtime="dotnet" + fi + ;; + --runtime|-[Rr]untime) + shift + runtime="$1" + if [[ "$runtime" != "dotnet" ]] && [[ "$runtime" != "aspnetcore" ]]; then + say_err "Unsupported value for --runtime: '$1'. Valid values are 'dotnet' and 'aspnetcore'." + if [[ "$runtime" == "windowsdesktop" ]]; then + say_err "WindowsDesktop archives are manufactured for Windows platforms only." + fi + exit 1 + fi + ;; + --dry-run|-[Dd]ry[Rr]un) + dry_run=true + ;; + --no-path|-[Nn]o[Pp]ath) + no_path=true + non_dynamic_parameters+=" $name" + ;; + --verbose|-[Vv]erbose) + verbose=true + non_dynamic_parameters+=" $name" + ;; + --no-cdn|-[Nn]o[Cc]dn) + no_cdn=true + non_dynamic_parameters+=" $name" + ;; + --azure-feed|-[Aa]zure[Ff]eed) + shift + azure_feed="$1" + non_dynamic_parameters+=" $name "\""$1"\""" + ;; + --uncached-feed|-[Uu]ncached[Ff]eed) + shift + uncached_feed="$1" + non_dynamic_parameters+=" $name "\""$1"\""" + ;; + --feed-credential|-[Ff]eed[Cc]redential) + shift + feed_credential="$1" + #feed_credential should start with "?", for it to be added to the end of the link. + #adding "?" at the beginning of the feed_credential if needed. + [[ -z "$(echo $feed_credential)" ]] || [[ $feed_credential == \?* ]] || feed_credential="?$feed_credential" + ;; + --runtime-id|-[Rr]untime[Ii]d) + shift + runtime_id="$1" + non_dynamic_parameters+=" $name "\""$1"\""" + say_warning "Use of --runtime-id is obsolete and should be limited to the versions below 2.1. To override architecture, use --architecture option instead. To override OS, use --os option instead." + ;; + --jsonfile|-[Jj][Ss]on[Ff]ile) + shift + json_file="$1" + ;; + --skip-non-versioned-files|-[Ss]kip[Nn]on[Vv]ersioned[Ff]iles) + override_non_versioned_files=false + non_dynamic_parameters+=" $name" + ;; + --keep-zip|-[Kk]eep[Zz]ip) + keep_zip=true + non_dynamic_parameters+=" $name" + ;; + --zip-path|-[Zz]ip[Pp]ath) + shift + zip_path="$1" + ;; + -?|--?|-h|--help|-[Hh]elp) + script_name="dotnet-install.sh" + echo ".NET Tools Installer" + echo "Usage:" + echo " # Install a .NET SDK of a given Quality from a given Channel" + echo " $script_name [-c|--channel ] [-q|--quality ]" + echo " # Install a .NET SDK of a specific public version" + echo " $script_name [-v|--version ]" + echo " $script_name -h|-?|--help" + echo "" + echo "$script_name is a simple command line interface for obtaining dotnet cli." + echo " Note that the intended use of this script is for Continuous Integration (CI) scenarios, where:" + echo " - The SDK needs to be installed without user interaction and without admin rights." + echo " - The SDK installation doesn't need to persist across multiple CI runs." + echo " To set up a development environment or to run apps, use installers rather than this script. Visit https://dotnet.microsoft.com/download to get the installer." + echo "" + echo "Options:" + echo " -c,--channel Download from the channel specified, Defaults to \`$channel\`." + echo " -Channel" + echo " Possible values:" + echo " - STS - the most recent Standard Term Support release" + echo " - LTS - the most recent Long Term Support release" + echo " - 2-part version in a format A.B - represents a specific release" + echo " examples: 2.0; 1.0" + echo " - 3-part version in a format A.B.Cxx - represents a specific SDK release" + echo " examples: 5.0.1xx, 5.0.2xx." + echo " Supported since 5.0 release" + echo " Warning: Value 'Current' is deprecated for the Channel parameter. Use 'STS' instead." + echo " Note: The version parameter overrides the channel parameter when any version other than 'latest' is used." + echo " -v,--version Use specific VERSION, Defaults to \`$version\`." + echo " -Version" + echo " Possible values:" + echo " - latest - the latest build on specific channel" + echo " - 3-part version in a format A.B.C - represents specific version of build" + echo " examples: 2.0.0-preview2-006120; 1.1.0" + echo " -q,--quality Download the latest build of specified quality in the channel." + echo " -Quality" + echo " The possible values are: daily, signed, validated, preview, GA." + echo " Works only in combination with channel. Not applicable for STS and LTS channels and will be ignored if those channels are used." + echo " For SDK use channel in A.B.Cxx format. Using quality for SDK together with channel in A.B format is not supported." + echo " Supported since 5.0 release." + echo " Note: The version parameter overrides the channel parameter when any version other than 'latest' is used, and therefore overrides the quality." + echo " --internal,-Internal Download internal builds. Requires providing credentials via --feed-credential parameter." + echo " --feed-credential Token to access Azure feed. Used as a query string to append to the Azure feed." + echo " -FeedCredential This parameter typically is not specified." + echo " -i,--install-dir Install under specified location (see Install Location below)" + echo " -InstallDir" + echo " --architecture Architecture of dotnet binaries to be installed, Defaults to \`$architecture\`." + echo " --arch,-Architecture,-Arch" + echo " Possible values: x64, arm, arm64, s390x, ppc64le and loongarch64" + echo " --os Specifies operating system to be used when selecting the installer." + echo " Overrides the OS determination approach used by the script. Supported values: osx, linux, linux-musl, freebsd, rhel.6." + echo " In case any other value is provided, the platform will be determined by the script based on machine configuration." + echo " Not supported for legacy links. Use --runtime-id to specify platform for legacy links." + echo " Refer to: https://aka.ms/dotnet-os-lifecycle for more information." + echo " --runtime Installs a shared runtime only, without the SDK." + echo " -Runtime" + echo " Possible values:" + echo " - dotnet - the Microsoft.NETCore.App shared runtime" + echo " - aspnetcore - the Microsoft.AspNetCore.App shared runtime" + echo " --dry-run,-DryRun Do not perform installation. Display download link." + echo " --no-path, -NoPath Do not set PATH for the current process." + echo " --verbose,-Verbose Display diagnostics information." + echo " --azure-feed,-AzureFeed For internal use only." + echo " Allows using a different storage to download SDK archives from." + echo " This parameter is only used if --no-cdn is false." + echo " --uncached-feed,-UncachedFeed For internal use only." + echo " Allows using a different storage to download SDK archives from." + echo " This parameter is only used if --no-cdn is true." + echo " --skip-non-versioned-files Skips non-versioned files if they already exist, such as the dotnet executable." + echo " -SkipNonVersionedFiles" + echo " --no-cdn,-NoCdn Disable downloading from the Azure CDN, and use the uncached feed directly." + echo " --jsonfile Determines the SDK version from a user specified global.json file." + echo " Note: global.json must have a value for 'SDK:Version'" + echo " --keep-zip,-KeepZip If set, downloaded file is kept." + echo " --zip-path, -ZipPath If set, downloaded file is stored at the specified path." + echo " -?,--?,-h,--help,-Help Shows this help message" + echo "" + echo "Install Location:" + echo " Location is chosen in following order:" + echo " - --install-dir option" + echo " - Environmental variable DOTNET_INSTALL_DIR" + echo " - $HOME/.dotnet" + exit 0 + ;; + *) + say_err "Unknown argument \`$name\`" + exit 1 + ;; + esac + + shift +done + +say_verbose "Note that the intended use of this script is for Continuous Integration (CI) scenarios, where:" +say_verbose "- The SDK needs to be installed without user interaction and without admin rights." +say_verbose "- The SDK installation doesn't need to persist across multiple CI runs." +say_verbose "To set up a development environment or to run apps, use installers rather than this script. Visit https://dotnet.microsoft.com/download to get the installer.\n" + +if [ "$internal" = true ] && [ -z "$(echo $feed_credential)" ]; then + message="Provide credentials via --feed-credential parameter." + if [ "$dry_run" = true ]; then + say_warning "$message" + else + say_err "$message" + exit 1 + fi +fi + +check_min_reqs +calculate_vars +# generate_regular_links call below will 'exit' if the determined version is already installed. +generate_download_links + +if [[ "$dry_run" = true ]]; then + print_dry_run + exit 0 +fi + +install_dotnet + +bin_path="$(get_absolute_path "$(combine_paths "$install_root" "$bin_folder_relative_path")")" +if [ "$no_path" = false ]; then + say "Adding to current process PATH: \`$bin_path\`. Note: This change will be visible only when sourcing script." + export PATH="$bin_path":"$PATH" +else + say "Binaries of dotnet can be found in $bin_path" +fi + +say "Note that the script does not resolve dependencies during installation." +say "To check the list of dependencies, go to https://learn.microsoft.com/dotnet/core/install, select your operating system and check the \"Dependencies\" section." +say "Installation finished successfully." \ No newline at end of file diff --git a/src/php/devcontainer-feature.json b/src/php/devcontainer-feature.json index 77fcf9812..4db478230 100644 --- a/src/php/devcontainer-feature.json +++ b/src/php/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "php", - "version": "1.1.2", + "version": "1.1.4", "name": "PHP", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/php", "options": { @@ -29,7 +29,14 @@ "bmewburn.vscode-intelephense-client", "xdebug.php-pack", "devsense.phptools-vscode" - ] + ], + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes PHP pre-installed and available on the `PATH`, along with PHP language extensions for PHP development." + } + ] + } } }, "containerEnv": { diff --git a/src/php/install.sh b/src/php/install.sh index 48140e1b3..357395e88 100755 --- a/src/php/install.sh +++ b/src/php/install.sh @@ -121,6 +121,46 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + # Install PHP Composer addcomposer() { "${PHP_SRC}" -r "copy('https://getcomposer.org/installer', 'composer-setup.php');" @@ -130,8 +170,7 @@ addcomposer() { "${PHP_SRC}" -r "unlink('composer-setup.php');" } -install_php() { - PHP_VERSION="$1" +init_php_install() { PHP_INSTALL_DIR="${PHP_DIR}/${PHP_VERSION}" if [ -d "${PHP_INSTALL_DIR}" ]; then echo "(!) PHP version ${PHP_VERSION} already exists." @@ -142,7 +181,6 @@ install_php() { groupadd -r php fi usermod -a -G php "${USERNAME}" - PHP_URL="https://www.php.net/distributions/php-${PHP_VERSION}.tar.gz" PHP_INI_DIR="${PHP_INSTALL_DIR}/ini" @@ -155,7 +193,26 @@ install_php() { PHP_SRC_DIR="/usr/src/php" mkdir -p $PHP_SRC_DIR cd $PHP_SRC_DIR - wget -O php.tar.xz "$PHP_URL" +} + +install_previous_version() { + PHP_VERSION=$1 + if [[ "$ORIGINAL_PHP_VERSION" == "latest" ]]; then + find_prev_version_from_git_tags PHP_VERSION https://github.com/php/php-src "tags/php-" + echo -e "\nAttempting to install previous version v${PHP_VERSION}" + init_php_install + wget -O php.tar.xz "$PHP_URL" + else + echo -e "\nFailed to install v$PHP_VERSION" + fi +} + +install_php() { + PHP_VERSION="$1" + + init_php_install + + wget -O php.tar.xz "$PHP_URL" || install_previous_version "$PHP_VERSION" tar -xf $PHP_SRC_DIR/php.tar.xz -C "$PHP_SRC_DIR" --strip-components=1 cd $PHP_SRC_DIR; @@ -195,7 +252,7 @@ install_php() { if [ "${PHP_VERSION}" != "none" ]; then # Persistent / runtime dependencies - RUNTIME_DEPS="wget ca-certificates git build-essential xz-utils" + RUNTIME_DEPS="wget ca-certificates git build-essential xz-utils curl" # PHP dependencies PHP_DEPS="libssl-dev libcurl4-openssl-dev libedit-dev libsqlite3-dev libxml2-dev zlib1g-dev libsodium-dev libonig-dev" @@ -214,6 +271,8 @@ if [ "${PHP_VERSION}" != "none" ]; then # Install dependencies check_packages $RUNTIME_DEPS $PHP_DEPS $PHPIZE_DEPS + # storing value of PHP_VERSION before it changes + ORIGINAL_PHP_VERSION=$PHP_VERSION find_version_from_git_tags PHP_VERSION https://github.com/php/php-src "tags/php-" install_php "${PHP_VERSION}" diff --git a/src/powershell/README.md b/src/powershell/README.md index ccc62f7fe..31199a4e1 100644 --- a/src/powershell/README.md +++ b/src/powershell/README.md @@ -16,7 +16,14 @@ Installs PowerShell along with needed dependencies. Useful for base Dockerfiles | Options Id | Description | Type | Default Value | |-----|-----|-----|-----| | version | Select or enter a version of PowerShell. | string | latest | -| modules | Optional comma separated list of PowerShell modules to install. | string | - | +| modules | Optional comma separated list of PowerShell modules to install. If you need to install a specific version of a module, use '==' to specify the version (e.g. 'az.resources==2.5.0') | string | - | +| powershellProfileURL | Optional (publicly accessible) URL to download PowerShell profile. | string | - | + +## Customizations + +### VS Code Extensions + +- `ms-vscode.powershell` diff --git a/src/powershell/devcontainer-feature.json b/src/powershell/devcontainer-feature.json index dce05755e..f4867cc9e 100644 --- a/src/powershell/devcontainer-feature.json +++ b/src/powershell/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "powershell", - "version": "1.1.0", + "version": "1.5.1", "name": "PowerShell", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/powershell", "description": "Installs PowerShell along with needed dependencies. Useful for base Dockerfiles that often are missing required install dependencies like gpg.", @@ -10,7 +10,9 @@ "proposals": [ "latest", "none", - "7.1" + "7.4", + "7.3", + "7.2" ], "default": "latest", "description": "Select or enter a version of PowerShell." @@ -18,9 +20,28 @@ "modules": { "type": "string", "default": "", - "description": "Optional comma separated list of PowerShell modules to install." - } + "description": "Optional comma separated list of PowerShell modules to install. If you need to install a specific version of a module, use '==' to specify the version (e.g. 'az.resources==2.5.0')" }, + "powershellProfileURL": { + "type": "string", + "default": "", + "description": "Optional (publicly accessible) URL to download PowerShell profile." + } + }, + "customizations": { + "vscode": { + "extensions": [ + "ms-vscode.powershell" + ] + }, + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes PowerShell along with needed dependencies pre-installed and available on the `PATH`, along with the PowerShell extension." + } + ] + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] diff --git a/src/powershell/install.sh b/src/powershell/install.sh index b220c3acf..3da7a231b 100755 --- a/src/powershell/install.sh +++ b/src/powershell/install.sh @@ -13,20 +13,93 @@ set -e rm -rf /var/lib/apt/lists/* POWERSHELL_VERSION=${VERSION:-"latest"} -POWERSHELL_MODULES="${MODULES}" +POWERSHELL_MODULES="${MODULES:-""}" +POWERSHELL_PROFILE_URL="${POWERSHELLPROFILEURL}" MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc" -POWERSHELL_ARCHIVE_ARCHITECTURES="amd64" -POWERSHELL_ARCHIVE_VERSION_CODENAMES="stretch buster bionic focal bullseye jammy" +#MICROSOFT_GPG_KEYS_URI=$(curl https://packages.microsoft.com/keys/microsoft.asc -o /usr/share/keyrings/microsoft-archive-keyring.gpg) +POWERSHELL_ARCHIVE_ARCHITECTURES_UBUNTU="amd64" +POWERSHELL_ARCHIVE_ARCHITECTURES_ALMALINUX="x86_64" +POWERSHELL_ARCHIVE_VERSION_CODENAMES="stretch buster bionic focal bullseye jammy bookworm noble" + +#These key servers are used to verify the authenticity of packages and repositories. +#keyservers for ubuntu and almalinux are different so we need to specify both GPG_KEY_SERVERS="keyserver hkp://keyserver.ubuntu.com +keyserver hkp://keyserver.ubuntu.com:80 +keyserver hkps://keys.openpgp.org +keyserver hkp://keyserver.pgp.com +keyserver hkp://keyserver.fedoraproject.org keyserver hkps://keys.openpgp.org -keyserver hkp://keyserver.pgp.com" +keyserver hkp://pgp.mit.edu +keyserver hkp://keyserver.redhat.com" + if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' exit 1 fi +# Clean up package manager cache +clean_cache() { + if [ -d "/var/cache/apt" ]; then + apt-get clean + fi + if [ -d "/var/cache/dnf" ]; then + rm -rf /var/cache/dnf/* + fi +} +# Install dependencies for RHEL/CentOS/AlmaLinux (DNF-based systems) +install_using_dnf() { + dnf remove -y curl-minimal + dnf install -y curl gnupg2 ca-certificates dnf-plugins-core + dnf clean all + dnf makecache + curl --version +} + +# Install PowerShell on RHEL/CentOS/AlmaLinux-based systems (DNF) +install_powershell_dnf() { + # Install wget, if not already installed + dnf install -y wget + + # Download Microsoft GPG key + curl https://packages.microsoft.com/keys/microsoft.asc -o /usr/share/keyrings/microsoft-archive-keyring.gpg + ls -l /usr/share/keyrings/microsoft-archive-keyring.gpg + + # Install necessary dependencies + dnf install -y krb5-libs libicu openssl-libs zlib + + # Add Microsoft PowerShell repository + curl "https://packages.microsoft.com/config/rhel/9.0/prod.repo" > /etc/yum.repos.d/microsoft.repo + + # Install PowerShell + dnf install --assumeyes powershell +} + + +# Detect the package manager and OS +detect_package_manager() { + if [ -f /etc/os-release ]; then + . /etc/os-release + if [[ "$ID" == "ubuntu" || "$ID" == "debian" ]]; then + echo "Detected Debian/Ubuntu-based system" + install_using_apt + install_pwsh + elif [[ "$ID" == "centos" || "$ID" == "rhel" || "$ID" == "almalinux" ]]; then + echo "Detected RHEL/CentOS/AlmaLinux-based system" + install_using_dnf + install_powershell_dnf + install_pwsh + else + echo "Unsupported Linux distribution: $ID" + exit 1 + fi + else + echo "Could not detect OS" + exit 1 + fi +} + # Figure out correct version of a three part version number is not passed find_version_from_git_tags() { local variable_name=$1 @@ -70,19 +143,43 @@ apt_get_update() } # Checks if packages are installed and installs them if not -check_packages() { - if ! dpkg -s "$@" > /dev/null 2>&1; then - apt_get_update - apt-get -y install --no-install-recommends "$@" - fi + check_packages() { + if command -v dpkg > /dev/null 2>&1; then + # If dpkg exists, assume APT-based system (Debian/Ubuntu) + for package in "$@"; do + if ! dpkg -s "$package" > /dev/null 2>&1; then + echo "Package $package not installed. Installing using apt-get..." + apt-get update + apt-get install -y --no-install-recommends "$package" + else + echo "Package $package is already installed (APT)." + fi + done + elif command -v dnf > /dev/null 2>&1; then + for package in "$@"; do + if ! dnf list installed "$package" > /dev/null 2>&1; then + echo "Package $package not installed. Installing using dnf..." + dnf install -y "$package" + else + echo "Package $package is already installed (DNF)." + fi + done +else + echo "Unsupported package manager. Neither APT nor DNF found." + return 1 +fi + + } install_using_apt() { # Install dependencies check_packages apt-transport-https curl ca-certificates gnupg2 dirmngr # Import key safely (new 'signed-by' method rather than deprecated apt-key approach) and install + curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list + # Update lists apt-get update -yq @@ -104,23 +201,118 @@ install_using_apt() { apt-get install -yq powershell${version_suffix} || return 1 } +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + +# Function to fetch the version released prior to the latest version +get_previous_version() { + local url=$1 + local repo_url=$2 + local variable_name=$3 + prev_version=${!variable_name} + + output=$(curl -s "$repo_url"); + check_packages jq + message=$(echo "$output" | jq -r '.message') + + if [[ $message == "API rate limit exceeded"* ]]; then + echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}" + echo -e "\nAttempting to find latest version using GitHub tags." + find_prev_version_from_git_tags prev_version "$url" "tags/v" + declare -g ${variable_name}="${prev_version}" + else + echo -e "\nAttempting to find latest version using GitHub Api." + version=$(echo "$output" | jq -r '.tag_name') + declare -g ${variable_name}="${version#v}" + fi + echo "${variable_name}=${!variable_name}" +} + +get_github_api_repo_url() { + local url=$1 + echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases/latest" +} + + +install_prev_pwsh() { + pwsh_url=$1 + repo_url=$(get_github_api_repo_url $pwsh_url) + echo -e "\n(!) Failed to fetch the latest artifacts for powershell v${POWERSHELL_VERSION}..." + get_previous_version $pwsh_url $repo_url POWERSHELL_VERSION + echo -e "\nAttempting to install v${POWERSHELL_VERSION}" + install_pwsh "${POWERSHELL_VERSION}" +} + +install_pwsh() { + POWERSHELL_VERSION=$1 + powershell_filename="powershell-${POWERSHELL_VERSION}-linux-${architecture}.tar.gz" + powershell_target_path="/opt/microsoft/powershell/$(echo ${POWERSHELL_VERSION} | grep -oE '[^\.]+' | head -n 1)" + mkdir -p /tmp/pwsh "${powershell_target_path}" + cd /tmp/pwsh + curl -sSL -o "${powershell_filename}" "https://github.com/PowerShell/PowerShell/releases/download/v${POWERSHELL_VERSION}/${powershell_filename}" +} + install_using_github() { # Fall back on direct download if no apt package exists in microsoft pool check_packages curl ca-certificates gnupg2 dirmngr libc6 libgcc1 libgssapi-krb5-2 libstdc++6 libunwind8 libuuid1 zlib1g libicu[0-9][0-9] if ! type git > /dev/null 2>&1; then check_packages git fi - if [ "${architecture}" = "amd64" ]; then + + if [ "${architecture}" = "amd64" ]; then architecture="x64" fi - find_version_from_git_tags POWERSHELL_VERSION https://github.com/PowerShell/PowerShell - powershell_filename="powershell-${POWERSHELL_VERSION}-linux-${architecture}.tar.gz" - powershell_target_path="/opt/microsoft/powershell/$(echo ${POWERSHELL_VERSION} | grep -oE '[^\.]+' | head -n 1)" - mkdir -p /tmp/pwsh "${powershell_target_path}" - cd /tmp/pwsh - curl -sSL -o "${powershell_filename}" "https://github.com/PowerShell/PowerShell/releases/download/v${POWERSHELL_VERSION}/${powershell_filename}" - # Ugly - but only way to get sha256 is to parse release HTML. Remove newlines and tags, then look for filename followed by 64 hex characters. - curl -sSL -o "release.html" "https://github.com/PowerShell/PowerShell/releases/tag/v${POWERSHELL_VERSION}" + pwsh_url="https://github.com/PowerShell/PowerShell" + find_version_from_git_tags POWERSHELL_VERSION $pwsh_url + install_pwsh "${POWERSHELL_VERSION}" + if grep -q "Not Found" "${powershell_filename}"; then + install_prev_pwsh $pwsh_url + fi + + # downlaod the latest version of powershell and extracting the file to powershell directory + wget https://github.com/PowerShell/PowerShell/releases/download/v${POWERSHELL_VERSION}/${powershell_filename} + mkdir ~/powershell + tar -xvf powershell-${POWERSHELL_VERSION}-linux-x64.tar.gz -C ~/powershell + + powershell_archive_sha256="$(cat release.html | tr '\n' ' ' | sed 's|<[^>]*>||g' | grep -oP "${powershell_filename}\s+\K[0-9a-fA-F]{64}" || echo '')" if [ -z "${powershell_archive_sha256}" ]; then echo "(!) WARNING: Failed to retrieve SHA256 for archive. Skipping validaiton." @@ -129,39 +321,73 @@ install_using_github() { echo "${powershell_archive_sha256} *${powershell_filename}" | sha256sum -c - fi tar xf "${powershell_filename}" -C "${powershell_target_path}" - ln -s "${powershell_target_path}/pwsh" /usr/local/bin/pwsh + chmod 755 "${powershell_target_path}/pwsh" + ln -sf "${powershell_target_path}/pwsh" /usr/bin/pwsh + add-shell "/usr/bin/pwsh" + cd /tmp rm -rf /tmp/pwsh } -export DEBIAN_FRONTEND=noninteractive +if ! type pwsh >/dev/null 2>&1; then + export DEBIAN_FRONTEND=noninteractive + + # Source /etc/os-release to get OS info + . /etc/os-release + architecture="$(uname -m)" + if [[ "$ID" == "ubuntu" || "$ID" == "debian" ]]; then + POWERSHELL_ARCHIVE_ARCHITECTURES="${POWERSHELL_ARCHIVE_ARCHITECTURES_UBUNTU}" + elif [[ "$ID" == "centos" || "$ID" == "rhel" || "$ID" == "almalinux" ]]; then + POWERSHELL_ARCHIVE_ARCHITECTURES="${POWERSHELL_ARCHIVE_ARCHITECTURES_ALMALINUX}" + fi -# Source /etc/os-release to get OS info -. /etc/os-release -architecture="$(dpkg --print-architecture)" + if [[ "${POWERSHELL_ARCHIVE_ARCHITECTURES}" = *"${POWERSHELL_ARCHIVE_ARCHITECTURES_UBUNTU}"* ]] && [[ "${POWERSHELL_ARCHIVE_VERSION_CODENAMES}" = *"${VERSION_CODENAME}"* ]]; then + install_using_apt || use_github="true" + elif [[ "${POWERSHELL_ARCHIVE_ARCHITECTURES}" = *"${POWERSHELL_ARCHIVE_ARCHITECTURES_ALMALINUX}"* ]]; then + install_using_dnf && install_powershell_dnf || use_github="true" + + else + use_github="true" + fi -if [[ "${POWERSHELL_ARCHIVE_ARCHITECTURES}" = *"${architecture}"* ]] && [[ "${POWERSHELL_ARCHIVE_VERSION_CODENAMES}" = *"${VERSION_CODENAME}"* ]]; then - install_using_apt || use_github="true" + if [ "${use_github}" = "true" ]; then + echo "Attempting install from GitHub release..." + install_using_github + fi else - use_github="true" -fi - -if [ "${use_github}" = "true" ]; then - echo "Attempting install from GitHub release..." - install_using_github + echo "PowerShell is already installed." fi -# If PowerShell modules are requested, loop through and install +# If PowerShell modules are requested, loop through and install if [ ${#POWERSHELL_MODULES[@]} -gt 0 ]; then echo "Installing PowerShell Modules: ${POWERSHELL_MODULES}" modules=(`echo ${POWERSHELL_MODULES} | tr ',' ' '`) for i in "${modules[@]}" do - echo "Installing ${i}" - pwsh -Command "Install-Module -Name ${i} -AllowClobber -Force -Scope AllUsers" || continue + module_parts=(`echo ${i} | tr '==' ' '`) + module_name="${module_parts[0]}" + args="-Name ${module_name} -AllowClobber -Force -Scope AllUsers" + if [ "${#module_parts[@]}" -eq 2 ]; then + module_version="${module_parts[1]}" + echo "Installing ${module_name} v${module_version}" + args+=" -RequiredVersion ${module_version}" + else + echo "Installing latest version for ${i} module" + fi + + pwsh -Command "Install-Module $args" || continue done fi + +# If URL for powershell profile is provided, download it to '/opt/microsoft/powershell/7/profile.ps1' +if [ -n "$POWERSHELL_PROFILE_URL" ]; then + echo "Downloading PowerShell Profile from: $POWERSHELL_PROFILE_URL" + # Get profile path from currently installed pwsh + profilePath=$(pwsh -noni -c '$PROFILE.AllUsersAllHosts') + sudo -E curl -sSL -o "$profilePath" "$POWERSHELL_PROFILE_URL" +fi + # Clean up rm -rf /var/lib/apt/lists/* -echo "Done!" +echo "Done!" \ No newline at end of file diff --git a/src/python/NOTES.md b/src/python/NOTES.md index 19fe92f31..79a308cf5 100644 --- a/src/python/NOTES.md +++ b/src/python/NOTES.md @@ -2,6 +2,6 @@ ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +This Feature should work on recent versions of Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and RockyLinux distributions with the apt, yum, dnf, or microdnf package manager installed. `bash` is required to execute the `install.sh` script. diff --git a/src/python/README.md b/src/python/README.md index 2dd092ae7..90d79aee8 100644 --- a/src/python/README.md +++ b/src/python/README.md @@ -16,8 +16,10 @@ Installs the provided version of Python, as well as PIPX, and other common Pytho | Options Id | Description | Type | Default Value | |-----|-----|-----|-----| | version | Select a Python version to install. | string | os-provided | -| installTools | Install common Python tools like pylint | boolean | true | +| installTools | Flag indicating whether or not to install the tools specified via the 'toolsToInstall' option. Default is 'true'. | boolean | true | +| toolsToInstall | Comma-separated list of tools to install when 'installTools' is true. Defaults to a set of common Python tools like pylint. | string | flake8,autopep8,black,yapf,mypy,pydocstyle,pycodestyle,bandit,pipenv,virtualenv,pytest,pylint | | optimize | Optimize Python for performance when compiled (slow) | boolean | false | +| enableShared | Enable building a shared Python library | boolean | false | | installPath | The path where python will be installed. | string | /usr/local/python | | installJupyterlab | Install JupyterLab, a web-based interactive development environment for notebooks | boolean | false | | configureJupyterlabAllowOrigin | Configure JupyterLab to accept HTTP requests from the specified origin | string | - | @@ -29,12 +31,13 @@ Installs the provided version of Python, as well as PIPX, and other common Pytho - `ms-python.python` - `ms-python.vscode-pylance` +- `ms-python.autopep8` ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +This Feature should work on recent versions of Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, and RockyLinux distributions with the apt, yum, dnf, or microdnf package manager installed. `bash` is required to execute the `install.sh` script. diff --git a/src/python/devcontainer-feature.json b/src/python/devcontainer-feature.json index b4aa21e25..635b233cf 100644 --- a/src/python/devcontainer-feature.json +++ b/src/python/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "python", - "version": "1.1.0", + "version": "1.7.1", "name": "Python", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/python", "description": "Installs the provided version of Python, as well as PIPX, and other common Python utilities. JupyterLab is conditionally installed with the python feature. Note: May require source code compilation.", @@ -11,6 +11,7 @@ "latest", "os-provided", "none", + "3.12", "3.11", "3.10", "3.9", @@ -24,13 +25,23 @@ "installTools": { "type": "boolean", "default": true, - "description": "Install common Python tools like pylint" + "description": "Flag indicating whether or not to install the tools specified via the 'toolsToInstall' option. Default is 'true'." + }, + "toolsToInstall": { + "type": "string", + "default": "flake8,autopep8,black,yapf,mypy,pydocstyle,pycodestyle,bandit,pipenv,virtualenv,pytest,pylint", + "description": "Comma-separated list of tools to install when 'installTools' is true. Defaults to a set of common Python tools like pylint." }, "optimize": { "type": "boolean", "default": false, "description": "Optimize Python for performance when compiled (slow)" }, + "enableShared": { + "type": "boolean", + "default": false, + "description": "Enable building a shared Python library" + }, "installPath": { "type": "string", "default": "/usr/local/python", @@ -56,29 +67,30 @@ "PYTHON_PATH": "/usr/local/python/current", "PIPX_HOME": "/usr/local/py-utils", "PIPX_BIN_DIR": "/usr/local/py-utils/bin", - "PATH": "/usr/local/python/current/bin:/usr/local/py-utils/bin:${PATH}" + "PATH": "/usr/local/python/current/bin:/usr/local/py-utils/bin:/usr/local/jupyter:${PATH}" }, "customizations": { "vscode": { "extensions": [ "ms-python.python", - "ms-python.vscode-pylance" + "ms-python.vscode-pylance", + "ms-python.autopep8" ], "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes `python3` and `pip3` pre-installed and available on the `PATH`, along with the Python language extensions for Python development." + } + ], "python.defaultInterpreterPath": "/usr/local/python/current/bin/python", - "python.formatting.autopep8Path": "/usr/local/py-utils/bin/autopep8", - "python.formatting.blackPath": "/usr/local/py-utils/bin/black", - "python.linting.flake8Path": "/usr/local/py-utils/bin/flake8", - "python.linting.flake8Enabled": false, - "python.linting.mypyPath": "/usr/local/py-utils/bin/mypy", - "python.linting.mypyEnabled": false, - "python.linting.pylintPath": "/usr/local/py-utils/bin/pylint", - "python.linting.pylintEnabled": false + "[python]": { + "editor.defaultFormatter": "ms-python.autopep8" + } } } }, "installsAfter": [ - "ghcr.io/devcontainers/features/common-utils", - "ghcr.io/devcontainers/features/oryx" + "ghcr.io/devcontainers/features/common-utils", + "ghcr.io/devcontainers/features/oryx" ] -} +} \ No newline at end of file diff --git a/src/python/install.sh b/src/python/install.sh index 0e2b1253c..fec8937a0 100755 --- a/src/python/install.sh +++ b/src/python/install.sh @@ -9,7 +9,9 @@ PYTHON_VERSION="${VERSION:-"latest"}" # 'system' or 'os-provided' checks the base image first, else installs 'latest' INSTALL_PYTHON_TOOLS="${INSTALLTOOLS:-"true"}" +SKIP_VULNERABILITY_PATCHING="${SKIPVULNERABILITYPATCHING:-"false"}" OPTIMIZE_BUILD_FROM_SOURCE="${OPTIMIZE:-"false"}" +ENABLE_SHARED_FROM_SOURCE="${ENABLESHARED:-"false"}" PYTHON_INSTALL_PATH="${INSTALLPATH:-"/usr/local/python"}" OVERRIDE_DEFAULT_VERSION="${OVERRIDEDEFAULTVERSION:-"true"}" @@ -26,62 +28,148 @@ CONFIGURE_JUPYTERLAB_ALLOW_ORIGIN="${CONFIGUREJUPYTERLABALLOWORIGIN:-""}" # alongside PYTHON_VERSION, but not set as default. ADDITIONAL_VERSIONS="${ADDITIONALVERSIONS:-""}" -DEFAULT_UTILS=("pylint" "flake8" "autopep8" "black" "yapf" "mypy" "pydocstyle" "pycodestyle" "bandit" "pipenv" "virtualenv" "pytest") -PYTHON_SOURCE_GPG_KEYS="64E628F8D684696D B26995E310250568 2D347EA6AA65421D FB9921286F5E1540 3A5CA953F73C700D 04C367C218ADD4FF 0EDDC5F26A45C816 6AF053F07D9DC8D2 C9BE28DEE6DF025C 126EB563A74B06BF D9866941EA5BBD71 ED9D77D5" -GPG_KEY_SERVERS="keyserver hkp://keyserver.ubuntu.com -keyserver hkps://keys.openpgp.org -keyserver hkp://keyserver.pgp.com" +# Comma-separated list of additional tools to be installed via pipx. +IFS="," read -r -a DEFAULT_UTILS <<< "${TOOLSTOINSTALL:-flake8,autopep8,black,yapf,mypy,pydocstyle,pycodestyle,bandit,pipenv,virtualenv,pytest}" + +PYTHON_SOURCE_GPG_KEYS="64E628F8D684696D B26995E310250568 2D347EA6AA65421D FB9921286F5E1540 3A5CA953F73C700D 04C367C218ADD4FF 0EDDC5F26A45C816 6AF053F07D9DC8D2 C9BE28DEE6DF025C 126EB563A74B06BF D9866941EA5BBD71 ED9D77D5 A821E680E5FA6305" KEYSERVER_PROXY="${HTTPPROXY:-"${HTTP_PROXY:-""}"}" set -e -# Clean up -rm -rf /var/lib/apt/lists/* - if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' exit 1 fi -# Ensure that login shells get the correct path if the user updated the PATH using ENV. -rm -f /etc/profile.d/00-restore-env.sh -echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh -chmod +x /etc/profile.d/00-restore-env.sh +# Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME +. /etc/os-release +# Get an adjusted ID independent of distro variants +MAJOR_VERSION_ID=$(echo ${VERSION_ID} | cut -d . -f 1) +if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then + ADJUSTED_ID="debian" +elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then + ADJUSTED_ID="rhel" + if [[ "${ID}" = "rhel" ]] || [[ "${ID}" = *"alma"* ]] || [[ "${ID}" = *"rocky"* ]]; then + VERSION_CODENAME="rhel${MAJOR_VERSION_ID}" + else + VERSION_CODENAME="${ID}${MAJOR_VERSION_ID}" + fi +else + echo "Linux distro ${ID} not supported." + exit 1 +fi -# Determine the appropriate non-root user -if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then - USERNAME="" - POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") - for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do - if id -u ${CURRENT_USER} > /dev/null 2>&1; then - USERNAME=${CURRENT_USER} - break - fi - done - if [ "${USERNAME}" = "" ]; then - USERNAME=root +if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then + # As of 1 July 2024, mirrorlist.centos.org no longer exists. + # Update the repo files to reference vault.centos.org. + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo +fi + +# To find some devel packages, some rhel need to enable specific extra repos, but not on RedHat ubi images... +INSTALL_CMD_ADDL_REPO="" +if [ ${ADJUSTED_ID} = "rhel" ] && [ ${ID} != "rhel" ]; then + if [ ${MAJOR_VERSION_ID} = "8" ]; then + INSTALL_CMD_ADDL_REPOS="--enablerepo powertools" + elif [ ${MAJOR_VERSION_ID} = "9" ]; then + INSTALL_CMD_ADDL_REPOS="--enablerepo crb" fi -elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then - USERNAME=root fi +# Setup INSTALL_CMD & PKG_MGR_CMD +if type apt-get > /dev/null 2>&1; then + PKG_MGR_CMD=apt-get + INSTALL_CMD="${PKG_MGR_CMD} -y install --no-install-recommends" +elif type microdnf > /dev/null 2>&1; then + PKG_MGR_CMD=microdnf + INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0" +elif type dnf > /dev/null 2>&1; then + PKG_MGR_CMD=dnf + INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --refresh --best --nodocs --noplugins --setopt=install_weak_deps=0" +else + PKG_MGR_CMD=yum + INSTALL_CMD="${PKG_MGR_CMD} ${INSTALL_CMD_ADDL_REPOS} -y install --noplugins --setopt=install_weak_deps=0" +fi + +# Clean up +clean_up() { + case ${ADJUSTED_ID} in + debian) + rm -rf /var/lib/apt/lists/* + ;; + rhel) + rm -rf /var/cache/dnf/* /var/cache/yum/* + rm -rf /tmp/yum.log + rm -rf ${GPG_INSTALL_PATH} + ;; + esac +} +clean_up + + + updaterc() { + local _bashrc + local _zshrc if [ "${UPDATE_RC}" = "true" ]; then - echo "Updating /etc/bash.bashrc and /etc/zsh/zshrc..." - if [[ "$(cat /etc/bash.bashrc)" != *"$1"* ]]; then - echo -e "$1" >> /etc/bash.bashrc + case $ADJUSTED_ID in + debian) echo "Updating /etc/bash.bashrc and /etc/zsh/zshrc..." + _bashrc=/etc/bash.bashrc + _zshrc=/etc/zsh/zshrc + ;; + rhel) echo "Updating /etc/bashrc and /etc/zshrc..." + _bashrc=/etc/bashrc + _zshrc=/etc/zshrc + ;; + esac + if [[ "$(cat ${_bashrc})" != *"$1"* ]]; then + echo -e "$1" >> ${_bashrc} fi - if [ -f "/etc/zsh/zshrc" ] && [[ "$(cat /etc/zsh/zshrc)" != *"$1"* ]]; then - echo -e "$1" >> /etc/zsh/zshrc + if [ -f "${_zshrc}" ] && [[ "$(cat ${_zshrc})" != *"$1"* ]]; then + echo -e "$1" >> ${_zshrc} fi fi } -# Import the specified key in a variable name passed in as +# Get the list of GPG key servers that are reachable +get_gpg_key_servers() { + declare -A keyservers_curl_map=( + ["hkp://keyserver.ubuntu.com"]="http://keyserver.ubuntu.com:11371" + ["hkp://keyserver.ubuntu.com:80"]="http://keyserver.ubuntu.com" + ["hkps://keys.openpgp.org"]="https://keys.openpgp.org" + ["hkp://keyserver.pgp.com"]="http://keyserver.pgp.com:11371" + ) + + local curl_args="" + local keyserver_reachable=false # Flag to indicate if any keyserver is reachable + + if [ ! -z "${KEYSERVER_PROXY}" ]; then + curl_args="--proxy ${KEYSERVER_PROXY}" + fi + + for keyserver in "${!keyservers_curl_map[@]}"; do + local keyserver_curl_url="${keyservers_curl_map[${keyserver}]}" + if curl -s ${curl_args} --max-time 5 ${keyserver_curl_url} > /dev/null; then + echo "keyserver ${keyserver}" + keyserver_reachable=true + else + echo "(*) Keyserver ${keyserver} is not reachable." >&2 + fi + done + + if ! $keyserver_reachable; then + echo "(!) No keyserver is reachable." >&2 + exit 1 + fi +} + +# Import the specified key in a variable name passed in as receive_gpg_keys() { local keys=${!1} local keyring_args="" + local gpg_cmd="gpg" if [ ! -z "$2" ]; then mkdir -p "$(dirname \"$2\")" keyring_args="--no-default-keyring --keyring $2" @@ -90,21 +178,26 @@ receive_gpg_keys() { keyring_args="${keyring_args} --keyserver-options http-proxy=${KEYSERVER_PROXY}" fi + # Install curl + if ! type curl > /dev/null 2>&1; then + check_packages curl + fi + # Use a temporary location for gpg keys to avoid polluting image export GNUPGHOME="/tmp/tmp-gnupg" mkdir -p ${GNUPGHOME} chmod 700 ${GNUPGHOME} - echo -e "disable-ipv6\n${GPG_KEY_SERVERS}" > ${GNUPGHOME}/dirmngr.conf + echo -e "disable-ipv6\n$(get_gpg_key_servers)" > ${GNUPGHOME}/dirmngr.conf # GPG key download sometimes fails for some reason and retrying fixes it. local retry_count=0 local gpg_ok="false" set +e - until [ "${gpg_ok}" = "true" ] || [ "${retry_count}" -eq "5" ]; + until [ "${gpg_ok}" = "true" ] || [ "${retry_count}" -eq "5" ]; do echo "(*) Downloading GPG key..." ( echo "${keys}" | xargs -n 1 gpg -q ${keyring_args} --recv-keys) 2>&1 && gpg_ok="true" if [ "${gpg_ok}" != "true" ]; then - echo "(*) Failed getting key, retring in 10s..." + echo "(*) Failed getting key, retrying in 10s..." (( retry_count++ )) sleep 10s fi @@ -115,6 +208,56 @@ receive_gpg_keys() { exit 1 fi } +# RHEL7/CentOS7 has an older gpg that does not have dirmngr +# Iterate through keyservers until we have all the keys downloaded +receive_gpg_keys_centos7() { + local keys=${!1} + local keyring_args="" + local gpg_cmd="gpg" + if [ ! -z "$2" ]; then + mkdir -p "$(dirname \"$2\")" + keyring_args="--no-default-keyring --keyring $2" + fi + if [ ! -z "${KEYSERVER_PROXY}" ]; then + keyring_args="${keyring_args} --keyserver-options http-proxy=${KEYSERVER_PROXY}" + fi + + # Install curl + if ! type curl > /dev/null 2>&1; then + check_packages curl + fi + + # Use a temporary location for gpg keys to avoid polluting image + export GNUPGHOME="/tmp/tmp-gnupg" + mkdir -p ${GNUPGHOME} + chmod 700 ${GNUPGHOME} + # GPG key download sometimes fails for some reason and retrying fixes it. + local retry_count=0 + local gpg_ok="false" + num_keys=$(echo ${keys} | wc -w) + set +e + echo "(*) Downloading GPG keys..." + until [ "${gpg_ok}" = "true" ] || [ "${retry_count}" -eq "5" ]; do + for keyserver in $(echo "$(get_gpg_key_servers)" | sed 's/keyserver //'); do + ( echo "${keys}" | xargs -n 1 gpg -q ${keyring_args} --recv-keys --keyserver=${keyserver} ) 2>&1 + downloaded_keys=$(gpg --list-keys | grep ^pub | wc -l) + if [[ ${num_keys} = ${downloaded_keys} ]]; then + gpg_ok="true" + break + fi + done + if [ "${gpg_ok}" != "true" ]; then + echo "(*) Failed getting key, retrying in 10s..." + (( retry_count++ )) + sleep 10s + fi + done + set -e + if [ "${gpg_ok}" = "false" ]; then + echo "(!) Failed to get gpg key." + exit 1 + fi +} # Figure out correct version of a three part version number is not passed find_version_from_git_tags() { @@ -124,7 +267,7 @@ find_version_from_git_tags() { local repository=$2 local prefix=${3:-"tags/v"} local separator=${4:-"."} - local last_part_optional=${5:-"false"} + local last_part_optional=${5:-"false"} if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then local escaped_separator=${separator//./\\.} local last_part @@ -150,6 +293,48 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + + # Use Oryx to install something using a partial version match oryx_install() { local platform=$1 @@ -188,42 +373,115 @@ oryx_install() { fi } -apt_get_update() -{ - if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then - echo "Running apt-get update..." - apt-get update -y - fi +pkg_mgr_update() { + case $ADJUSTED_ID in + debian) + if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then + echo "Running apt-get update..." + ${PKG_MGR_CMD} update -y + fi + ;; + rhel) + if [ ${PKG_MGR_CMD} = "microdnf" ]; then + if [ "$(ls /var/cache/yum/* 2>/dev/null | wc -l)" = 0 ]; then + echo "Running ${PKG_MGR_CMD} makecache ..." + ${PKG_MGR_CMD} makecache + fi + else + if [ "$(ls /var/cache/${PKG_MGR_CMD}/* 2>/dev/null | wc -l)" = 0 ]; then + echo "Running ${PKG_MGR_CMD} check-update ..." + set +e + ${PKG_MGR_CMD} check-update + rc=$? + if [ $rc != 0 ] && [ $rc != 100 ]; then + exit 1 + fi + set -e + fi + fi + ;; + esac } # Checks if packages are installed and installs them if not check_packages() { - if ! dpkg -s "$@" > /dev/null 2>&1; then - apt_get_update - apt-get -y install --no-install-recommends "$@" - fi + case ${ADJUSTED_ID} in + debian) + if ! dpkg -s "$@" > /dev/null 2>&1; then + pkg_mgr_update + ${INSTALL_CMD} "$@" + fi + ;; + rhel) + if ! rpm -q "$@" > /dev/null 2>&1; then + pkg_mgr_update + ${INSTALL_CMD} "$@" + fi + ;; + esac } add_symlink() { if [[ ! -d "${CURRENT_PATH}" ]]; then - ln -s -r "${INSTALL_PATH}" "${CURRENT_PATH}" + ln -s -r "${INSTALL_PATH}" "${CURRENT_PATH}" fi if [ "${OVERRIDE_DEFAULT_VERSION}" = "true" ]; then if [[ $(ls -l ${CURRENT_PATH}) != *"-> ${INSTALL_PATH}"* ]] ; then rm "${CURRENT_PATH}" - ln -s -r "${INSTALL_PATH}" "${CURRENT_PATH}" + ln -s -r "${INSTALL_PATH}" "${CURRENT_PATH}" fi fi } +install_openssl3() { + mkdir /tmp/openssl3 + ( + cd /tmp/openssl3 + openssl3_version="3.0" + # Find version using soft match + find_version_from_git_tags openssl3_version "https://github.com/openssl/openssl" "openssl-" + local tgz_filename="openssl-${openssl3_version}.tar.gz" + local tgz_url="https://github.com/openssl/openssl/releases/download/openssl-${openssl3_version}/${tgz_filename}" + echo "Downloading ${tgz_filename}..." + curl -sSL -o "/tmp/openssl3/${tgz_filename}" "${tgz_url}" + tar xzf ${tgz_filename} + cd openssl-${openssl3_version} + ./config --libdir=lib + make -j $(nproc) + make install_dev + ) + rm -rf /tmp/openssl3 +} + +install_prev_vers_cpython() { + VERSION=$1 + echo -e "\n(!) Failed to fetch the latest artifacts for cpython ${VERSION}..." + find_prev_version_from_git_tags VERSION https://github.com/python/cpython + echo -e "\nAttempting to install ${VERSION}" + install_cpython "${VERSION}" +} + +install_cpython() { + VERSION=$1 + INSTALL_PATH="${PYTHON_INSTALL_PATH}/${VERSION}" + + # Check if the specified Python version is already installed + if [ -d "${INSTALL_PATH}" ]; then + echo "(!) Python version ${VERSION} already exists." + else + mkdir -p /tmp/python-src ${INSTALL_PATH} + cd /tmp/python-src + cpython_tgz_filename="Python-${VERSION}.tgz" + cpython_tgz_url="https://www.python.org/ftp/python/${VERSION}/${cpython_tgz_filename}" + echo "Downloading ${cpython_tgz_filename}..." + curl -sSL -o "/tmp/python-src/${cpython_tgz_filename}" "${cpython_tgz_url}" + fi +} + install_from_source() { - VERSION=$1 + VERSION=$1 echo "(*) Building Python ${VERSION} from source..." - # Install prereqs if missing - check_packages curl ca-certificates gnupg2 tar make gcc libssl-dev zlib1g-dev libncurses5-dev \ - libbz2-dev libreadline-dev libxml2-dev xz-utils libgdbm-dev tk-dev dirmngr \ - libxmlsec1-dev libsqlite3-dev libffi-dev liblzma-dev uuid-dev if ! type git > /dev/null 2>&1; then check_packages git fi @@ -231,41 +489,62 @@ install_from_source() { # Find version using soft match find_version_from_git_tags VERSION "https://github.com/python/cpython" - INSTALL_PATH="${PYTHON_INSTALL_PATH}/${VERSION}" - - if [ -d "${INSTALL_PATH}" ]; then - echo "(!) Python version ${VERSION} already exists." - exit 1 - fi - - # Download tgz of source - mkdir -p /tmp/python-src ${INSTALL_PATH} - cd /tmp/python-src - local tgz_filename="Python-${VERSION}.tgz" - local tgz_url="https://www.python.org/ftp/python/${VERSION}/${tgz_filename}" - echo "Downloading ${tgz_filename}..." - curl -sSL -o "/tmp/python-src/${tgz_filename}" "${tgz_url}" - + # Some platforms/os versions need modern versions of openssl installed + # via common package repositories, for now rhel-7 family, use case statement to + # make it easy to expand + SSL_INSTALL_PATH="/usr/local" + case ${VERSION_CODENAME} in + centos7|rhel7) + check_packages perl-IPC-Cmd + install_openssl3 + ADDL_CONFIG_ARGS="--with-openssl=${SSL_INSTALL_PATH} --with-openssl-rpath=${SSL_INSTALL_PATH}/lib" + ;; + esac + + install_cpython "${VERSION}" + if [ -f "/tmp/python-src/${cpython_tgz_filename}" ]; then + if grep -q "404 Not Found" "/tmp/python-src/${cpython_tgz_filename}"; then + install_prev_vers_cpython "${VERSION}" + fi + fi; # Verify signature - receive_gpg_keys PYTHON_SOURCE_GPG_KEYS - echo "Downloading ${tgz_filename}.asc..." - curl -sSL -o "/tmp/python-src/${tgz_filename}.asc" "${tgz_url}.asc" - gpg --verify "${tgz_filename}.asc" + if [[ ${VERSION_CODENAME} = "centos7" ]] || [[ ${VERSION_CODENAME} = "rhel7" ]]; then + receive_gpg_keys_centos7 PYTHON_SOURCE_GPG_KEYS + else + receive_gpg_keys PYTHON_SOURCE_GPG_KEYS + fi + echo "Downloading ${cpython_tgz_filename}.asc..." + curl -sSL -o "/tmp/python-src/${cpython_tgz_filename}.asc" "${cpython_tgz_url}.asc" + gpg --verify "${cpython_tgz_filename}.asc" # Update min protocol for testing only - https://bugs.python.org/issue41561 - cp /etc/ssl/openssl.cnf /tmp/python-src/ + if [ -f /etc/pki/tls/openssl.cnf ]; then + cp /etc/pki/tls/openssl.cnf /tmp/python-src/ + else + cp /etc/ssl/openssl.cnf /tmp/python-src/ + fi sed -i -E 's/MinProtocol[=\ ]+.*/MinProtocol = TLSv1.0/g' /tmp/python-src/openssl.cnf export OPENSSL_CONF=/tmp/python-src/openssl.cnf # Untar and build - tar -xzf "/tmp/python-src/${tgz_filename}" -C "/tmp/python-src" --strip-components=1 + tar -xzf "/tmp/python-src/${cpython_tgz_filename}" -C "/tmp/python-src" --strip-components=1 local config_args="" if [ "${OPTIMIZE_BUILD_FROM_SOURCE}" = "true" ]; then - config_args="--enable-optimizations" + config_args="${config_args} --enable-optimizations" + fi + if [ "${ENABLESHARED}" = "true" ]; then + config_args=" ${config_args} --enable-shared" + # need double-$: LDFLAGS ends up in Makefile $$ becomes $ when evaluated. + # backslash needed for shell that Make calls escape the $. + export LDFLAGS="${LDFLAGS} -Wl,-rpath="'\$$ORIGIN'"/../lib" + fi + if [ -n "${ADDL_CONFIG_ARGS}" ]; then + config_args="${config_args} ${ADDL_CONFIG_ARGS}" fi ./configure --prefix="${INSTALL_PATH}" --with-ensurepip=install ${config_args} make -j 8 make install + cd /tmp rm -rf /tmp/python-src ${GNUPGHOME} /tmp/vscdc-settings.env @@ -280,23 +559,23 @@ install_from_source() { } install_using_oryx() { - VERSION=$1 + VERSION=$1 INSTALL_PATH="${PYTHON_INSTALL_PATH}/${VERSION}" - + + # Check if the specified Python version is already installed if [ -d "${INSTALL_PATH}" ]; then echo "(!) Python version ${VERSION} already exists." - exit 1 - fi - - # The python install root path may not exist, so create it - mkdir -p "${PYTHON_INSTALL_PATH}" - oryx_install "python" "${VERSION}" "${INSTALL_PATH}" "lib" || return 1 + else + # The python install root path may not exist, so create it + mkdir -p "${PYTHON_INSTALL_PATH}" + oryx_install "python" "${VERSION}" "${INSTALL_PATH}" "lib" || return 1 - ln -s "${INSTALL_PATH}/bin/idle3" "${INSTALL_PATH}/bin/idle" - ln -s "${INSTALL_PATH}/bin/pydoc3" "${INSTALL_PATH}/bin/pydoc" - ln -s "${INSTALL_PATH}/bin/python3-config" "${INSTALL_PATH}/bin/python-config" + ln -s "${INSTALL_PATH}/bin/idle3" "${INSTALL_PATH}/bin/idle" + ln -s "${INSTALL_PATH}/bin/pydoc3" "${INSTALL_PATH}/bin/pydoc" + ln -s "${INSTALL_PATH}/bin/python3-config" "${INSTALL_PATH}/bin/python-config" - add_symlink + add_symlink + fi } sudo_if() { @@ -304,32 +583,46 @@ sudo_if() { if [ "$(id -u)" -eq 0 ] && [ "$USERNAME" != "root" ]; then su - "$USERNAME" -c "$COMMAND" else - "$COMMAND" + $COMMAND fi } install_user_package() { - PACKAGE="$1" - sudo_if "${PYTHON_SRC}" -m pip install --user --upgrade --no-cache-dir "$PACKAGE" + INSTALL_UNDER_ROOT="$1" + PACKAGE="$2" + + if [ "$INSTALL_UNDER_ROOT" = true ]; then + sudo_if "${PYTHON_SRC}" -m pip install --upgrade --no-cache-dir "$PACKAGE" + else + sudo_if "${PYTHON_SRC}" -m pip install --user --upgrade --no-cache-dir "$PACKAGE" + fi } add_user_jupyter_config() { - CONFIG_DIR="/home/$USERNAME/.jupyter" - CONFIG_FILE="$CONFIG_DIR/jupyter_server_config.py" + CONFIG_DIR="$1" + CONFIG_FILE="$2" # Make sure the config file exists or create it with proper permissions test -d "$CONFIG_DIR" || sudo_if mkdir "$CONFIG_DIR" test -f "$CONFIG_FILE" || sudo_if touch "$CONFIG_FILE" # Don't write the same config more than once - grep -q "$1" "$CONFIG_FILE" || echo "$1" >> "$CONFIG_FILE" + grep -q "$3" "$CONFIG_FILE" || echo "$3" >> "$CONFIG_FILE" } install_python() { version=$1 # If the os-provided versions are "good enough", detect that and bail out. if [ ${version} = "os-provided" ] || [ ${version} = "system" ]; then - check_packages python3 python3-doc python3-pip python3-venv python3-dev python3-tk + if [ ${ADJUSTED_ID} = "debian" ]; then + check_packages python3 python3-doc python3-pip python3-venv python3-dev python3-tk + else + if [ ${ID} != "mariner" ]; then + check_packages python3 python3-pip python3-devel python3-tkinter + else + check_packages python3 python3-pip python3-devel + fi + fi INSTALL_PATH="/usr" local current_bin_path="${CURRENT_PATH}/bin" @@ -348,7 +641,7 @@ install_python() { fi should_install_from_source=false - elif [ "$(dpkg --print-architecture)" = "amd64" ] && [ "${USE_ORYX_IF_AVAILABLE}" = "true" ] && type oryx > /dev/null 2>&1; then + elif [ ${ADJUSTED_ID} = "debian" ] && [ "$(dpkg --print-architecture)" = "amd64" ] && [ "${USE_ORYX_IF_AVAILABLE}" = "true" ] && type oryx > /dev/null 2>&1; then install_using_oryx $version || should_install_from_source=true else should_install_from_source=true @@ -358,14 +651,126 @@ install_python() { fi } +python_is_externally_managed() { + local _python_cmd=$1 + local python_stdlib_dir=$( + ${_python_cmd} -c ' +import sys +import sysconfig +sys.prefix == sys.base_prefix and print(sysconfig.get_path("stdlib", sysconfig.get_default_scheme()))' + ) + if [ -f ${python_stdlib_dir}/EXTERNALLY-MANAGED ]; then + return 0 + else + return 1 + fi +} + +# Ensure that login shells get the correct path if the user updated the PATH using ENV. +rm -f /etc/profile.d/00-restore-env.sh +echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh +chmod +x /etc/profile.d/00-restore-env.sh + +# Some distributions do not install awk by default (e.g. Mariner) +if ! type awk >/dev/null 2>&1; then + check_packages awk +fi + +# Determine the appropriate non-root user +if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then + USERNAME="" + POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") + for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do + if id -u ${CURRENT_USER} > /dev/null 2>&1; then + USERNAME=${CURRENT_USER} + break + fi + done + if [ "${USERNAME}" = "" ]; then + USERNAME=root + fi +elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then + USERNAME=root +fi + # Ensure apt is in non-interactive to avoid prompts export DEBIAN_FRONTEND=noninteractive # General requirements -check_packages curl ca-certificates gnupg2 tar make gcc libssl-dev zlib1g-dev libncurses5-dev \ - libbz2-dev libreadline-dev libxml2-dev xz-utils libgdbm-dev tk-dev dirmngr \ - libxmlsec1-dev libsqlite3-dev libffi-dev liblzma-dev uuid-dev +REQUIRED_PKGS="" +case ${ADJUSTED_ID} in + debian) + REQUIRED_PKGS="${REQUIRED_PKGS} \ + ca-certificates \ + curl \ + dirmngr \ + gcc \ + gnupg2 \ + libbz2-dev \ + libffi-dev \ + libgdbm-dev \ + liblzma-dev \ + libncurses5-dev \ + libreadline-dev \ + libsqlite3-dev \ + libssl-dev \ + libxml2-dev \ + libxmlsec1-dev \ + make \ + tar \ + tk-dev \ + uuid-dev \ + xz-utils \ + zlib1g-dev" + ;; + rhel) + REQUIRED_PKGS="${REQUIRED_PKGS} \ + bzip2-devel \ + ca-certificates \ + findutils \ + gcc \ + gnupg2 \ + libffi-devel \ + libxml2-devel \ + make \ + ncurses-devel \ + openssl-devel \ + shadow-utils \ + sqlite-devel \ + tar \ + which \ + xz-devel \ + xz \ + zlib-devel" + if ! type curl >/dev/null 2>&1; then + REQUIRED_PKGS="${REQUIRED_PKGS} \ + curl" + fi + # Mariner does not have tk-devel package available, RedHat ubi8 and ubi9 do not have tk-devel + if [ ${ID} != "mariner" ] && [ ${ID} != "rhel" ]; then + REQUIRED_PKGS="${REQUIRED_PKGS} \ + tk-devel" + fi + # Redhat ubi8 and ubi9 do not have some packages by default, only add them + # if we're not on RedHat ... + if [ ${ID} != "rhel" ]; then + REQUIRED_PKGS="${REQUIRED_PKGS} \ + gdbm-devel \ + readline-devel \ + uuid-devel \ + xmlsec1-devel" + fi + ;; +esac + +check_packages ${REQUIRED_PKGS} + +# Function to get the major version from a SemVer string +get_major_version() { + local version="$1" + echo "$version" | cut -d '.' -f 1 +} # Install Python from source if needed if [ "${PYTHON_VERSION}" != "none" ]; then @@ -373,9 +778,7 @@ if [ "${PYTHON_VERSION}" != "none" ]; then groupadd -r python fi usermod -a -G python "${USERNAME}" - CURRENT_PATH="${PYTHON_INSTALL_PATH}/current" - install_python ${PYTHON_VERSION} # Additional python versions to be installed but not be set as default. @@ -384,9 +787,27 @@ if [ "${PYTHON_VERSION}" != "none" ]; then OLDIFS=$IFS IFS="," read -a additional_versions <<< "$ADDITIONAL_VERSIONS" - for version in "${additional_versions[@]}"; do + major_version=$(get_major_version ${VERSION}) + if type apt-get > /dev/null 2>&1; then + # Debian/Ubuntu: Use update-alternatives + update-alternatives --install ${CURRENT_PATH} python${major_version} ${PYTHON_INSTALL_PATH}/${VERSION} $((${#additional_versions[@]}+1)) + update-alternatives --set python${major_version} ${PYTHON_INSTALL_PATH}/${VERSION} + elif type dnf > /dev/null 2>&1 || type yum > /dev/null 2>&1 || type microdnf > /dev/null 2>&1; then + # Fedora/RHEL/CentOS: Use alternatives + alternatives --install ${CURRENT_PATH} python${major_version} ${PYTHON_INSTALL_PATH}/${VERSION} $((${#additional_versions[@]}+1)) + alternatives --set python${major_version} ${PYTHON_INSTALL_PATH}/${VERSION} + fi + for i in "${!additional_versions[@]}"; do + version=${additional_versions[$i]} OVERRIDE_DEFAULT_VERSION="false" install_python $version + if type apt-get > /dev/null 2>&1; then + # Debian/Ubuntu: Use update-alternatives + update-alternatives --install ${CURRENT_PATH} python${major_version} ${PYTHON_INSTALL_PATH}/${VERSION} $((${i}+1)) + elif type dnf > /dev/null 2>&1 || type yum > /dev/null 2>&1 || type microdnf > /dev/null 2>&1; then + # Fedora/RHEL/CentOS: Use alternatives + alternatives --install ${CURRENT_PATH} python${major_version} ${PYTHON_INSTALL_PATH}/${VERSION} $((${i}+1)) + fi done INSTALL_PATH="${OLD_INSTALL_PATH}" IFS=$OLDIFS @@ -396,19 +817,22 @@ if [ "${PYTHON_VERSION}" != "none" ]; then updaterc "if [[ \"\${PATH}\" != *\"${CURRENT_PATH}/bin\"* ]]; then export PATH=${CURRENT_PATH}/bin:\${PATH}; fi" PATH="${INSTALL_PATH}/bin:${PATH}" fi - + # Updates the symlinks for os-provided, or the installed python version in other cases chown -R "${USERNAME}:python" "${PYTHON_INSTALL_PATH}" chmod -R g+r+w "${PYTHON_INSTALL_PATH}" find "${PYTHON_INSTALL_PATH}" -type d -print0 | xargs -0 -n 1 chmod g+s PYTHON_SRC="${INSTALL_PATH}/bin/python3" + if ! type pip >/dev/null 2>&1 && type pip3 >/dev/null 2>&1; then + ln -s /usr/bin/pip3 /usr/bin/pip + fi else PYTHON_SRC=$(which python) fi # Install Python tools if needed -if [[ "${INSTALL_PYTHON_TOOLS}" = "true" ]] && [[ $(python --version) != "" ]]; then +if [[ "${INSTALL_PYTHON_TOOLS}" = "true" ]] && [[ -n "${PYTHON_SRC}" ]]; then echo 'Installing Python tools...' export PIPX_BIN_DIR="${PIPX_HOME}/bin" PATH="${PATH}:${PIPX_BIN_DIR}" @@ -421,24 +845,27 @@ if [[ "${INSTALL_PYTHON_TOOLS}" = "true" ]] && [[ $(python --version) != "" ]]; umask 0002 mkdir -p ${PIPX_BIN_DIR} chown -R "${USERNAME}:pipx" ${PIPX_HOME} - chmod -R g+r+w "${PIPX_HOME}" + chmod -R g+r+w "${PIPX_HOME}" find "${PIPX_HOME}" -type d -print0 | xargs -0 -n 1 chmod g+s # Update pip if not using os provided python - if [[ $(python --version) != "" ]] || [[ ${PYTHON_VERSION} != "os-provided" ]] && [[ ${PYTHON_VERSION} != "system" ]] && [[ ${PYTHON_VERSION} != "none" ]]; then + if [[ -n "${PYTHON_SRC}" ]] && [[ ${PYTHON_VERSION} != "os-provided" ]] && [[ ${PYTHON_VERSION} != "system" ]] && [[ ${PYTHON_VERSION} != "none" ]]; then echo "Updating pip..." - python -m pip install --no-cache-dir --upgrade pip + ${PYTHON_SRC} -m pip install --no-cache-dir --upgrade pip fi # Install tools - echo "Installing Python tools..." export PYTHONUSERBASE=/tmp/pip-tmp export PIP_CACHE_DIR=/tmp/pip-tmp/cache PIPX_DIR="" if ! type pipx > /dev/null 2>&1; then - pip3 install --disable-pip-version-check --no-cache-dir --user pipx 2>&1 - /tmp/pip-tmp/bin/pipx install --pip-args=--no-cache-dir pipx - PIPX_DIR="/tmp/pip-tmp/bin/" + if python_is_externally_managed ${PYTHON_SRC}; then + check_packages pipx + else + pip3 install --disable-pip-version-check --no-cache-dir --user pipx 2>&1 + /tmp/pip-tmp/bin/pipx install --pip-args=--no-cache-dir pipx + PIPX_DIR="/tmp/pip-tmp/bin/" + fi fi for util in "${DEFAULT_UTILS[@]}"; do if ! type ${util} > /dev/null 2>&1; then @@ -447,6 +874,44 @@ if [[ "${INSTALL_PYTHON_TOOLS}" = "true" ]] && [[ $(python --version) != "" ]]; echo "${util} already installed. Skipping." fi done + + # Temporary: Removes “setup tools” metadata directory due to https://github.com/advisories/GHSA-r9hx-vwmv-q579 + if [[ $SKIP_VULNERABILITY_PATCHING = "false" ]]; then + VULNERABLE_VERSIONS=("3.10" "3.11") + RUN_TIME_PY_VER_DETECT=$(${PYTHON_SRC} --version 2>&1) + PY_MAJOR_MINOR_VER=${RUN_TIME_PY_VER_DETECT:7:4}; + if [[ ${VULNERABLE_VERSIONS[*]} =~ $PY_MAJOR_MINOR_VER ]]; then + rm -rf ${PIPX_HOME}/shared/lib/"python${PY_MAJOR_MINOR_VER}"/site-packages/setuptools-65.5.0.dist-info + if [[ -e "/usr/local/lib/python${PY_MAJOR_MINOR_VER}/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl" ]]; then + # remove the vulnerable setuptools-65.5.0-py3-none-any.whl file + rm /usr/local/lib/python${PY_MAJOR_MINOR_VER}/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl + # create and change to the setuptools_downloaded directory + mkdir -p /tmp/setuptools_downloaded + cd /tmp/setuptools_downloaded + # download the source distribution for setuptools using pip + pip download setuptools==65.5.1 --no-binary :all: + # extract the filename of the setuptools-*.tar.gz file + filename=$(find . -maxdepth 1 -type f) + # create a directory to store unpacked contents of the source distribution + mkdir -p /tmp/setuptools_src_dist + # extract the contents inside the new directory + tar -xzf $filename -C /tmp/setuptools_src_dist + # move to the setuptools-* directory inside /setuptools_src_dist + cd /tmp/setuptools_src_dist/setuptools-65.5.1/ + # look for setup.py file in the current directory and create a wheel file + python setup.py bdist_wheel + # move inside the dist directory in pwd + cd dist + # copy this file to the ensurepip/_bundled directory + cp setuptools-65.5.1-py3-none-any.whl /usr/local/lib/python${PY_MAJOR_MINOR_VER}/ensurepip/_bundled/ + # replace the version in __init__.py file with the installed version + sed -i 's/_SETUPTOOLS_VERSION = \"65\.5\.0\"/_SETUPTOOLS_VERSION = "65.5.1"/g' /usr/local/lib/"python${PY_MAJOR_MINOR_VER}"/ensurepip/__init__.py + # cleanup created dir's + rm -rf /tmp/setuptools_downloaded /tmp/setuptools_src_dist + fi + fi + fi + rm -rf /tmp/pip-tmp updaterc "export PIPX_HOME=\"${PIPX_HOME}\"" @@ -461,17 +926,41 @@ if [ "${INSTALL_JUPYTERLAB}" = "true" ]; then exit 1 fi - install_user_package jupyterlab - install_user_package jupyterlab-git + INSTALL_UNDER_ROOT=true + if [ "$(id -u)" -eq 0 ] && [ "$USERNAME" != "root" ]; then + INSTALL_UNDER_ROOT=false + fi + + install_user_package $INSTALL_UNDER_ROOT jupyterlab + install_user_package $INSTALL_UNDER_ROOT jupyterlab-git + + # Create a symlink to the JupyterLab binary for non root users + if [ "$INSTALL_UNDER_ROOT" = false ]; then + JUPYTER_INPATH=/home/${USERNAME}/.local/bin + if [ ! -d "$JUPYTER_INPATH" ]; then + echo "Error: $JUPYTER_INPATH does not exist." + exit 1 + fi + JUPYTER_PATH=/usr/local/jupyter + ln -s "$JUPYTER_INPATH" "$JUPYTER_PATH" + fi # Configure JupyterLab if needed if [ -n "${CONFIGURE_JUPYTERLAB_ALLOW_ORIGIN}" ]; then - add_user_jupyter_config "c.ServerApp.allow_origin = '${CONFIGURE_JUPYTERLAB_ALLOW_ORIGIN}'" - add_user_jupyter_config "c.NotebookApp.allow_origin = '${CONFIGURE_JUPYTERLAB_ALLOW_ORIGIN}'" + # Resolve config directory + CONFIG_DIR="/root/.jupyter" + if [ "$INSTALL_UNDER_ROOT" = false ]; then + CONFIG_DIR="/home/$USERNAME/.jupyter" + fi + + CONFIG_FILE="$CONFIG_DIR/jupyter_server_config.py" + + add_user_jupyter_config $CONFIG_DIR $CONFIG_FILE "c.ServerApp.allow_origin = '${CONFIGURE_JUPYTERLAB_ALLOW_ORIGIN}'" + add_user_jupyter_config $CONFIG_DIR $CONFIG_FILE "c.NotebookApp.allow_origin = '${CONFIGURE_JUPYTERLAB_ALLOW_ORIGIN}'" fi fi # Clean up -rm -rf /var/lib/apt/lists/* +clean_up echo "Done!" diff --git a/src/ruby/README.md b/src/ruby/README.md index 8184036c6..7df6965c7 100644 --- a/src/ruby/README.md +++ b/src/ruby/README.md @@ -21,7 +21,6 @@ Installs Ruby, rvm, rbenv, common Ruby utilities, and needed dependencies. ### VS Code Extensions -- `rebornix.Ruby` - `shopify.ruby-lsp` diff --git a/src/ruby/devcontainer-feature.json b/src/ruby/devcontainer-feature.json index 29591e6a6..661c46bf0 100644 --- a/src/ruby/devcontainer-feature.json +++ b/src/ruby/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "ruby", - "version": "1.1.1", + "version": "1.3.2", "name": "Ruby (via rvm)", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/ruby", "description": "Installs Ruby, rvm, rbenv, common Ruby utilities, and needed dependencies.", @@ -10,9 +10,8 @@ "proposals": [ "latest", "none", - "3.1", - "3.0", - "2.7" + "3.4", + "3.2" ], "default": "latest", "description": "Select or enter a Ruby version to install" @@ -21,9 +20,15 @@ "customizations": { "vscode": { "extensions": [ - "rebornix.Ruby", "shopify.ruby-lsp" - ] + ], + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes Ruby, rvm, rbenv, common Ruby utilities, and needed dependencies pre-installed and available on the `PATH`, along with the Ruby language extension for Ruby development." + } + ] + } } }, "containerEnv": { diff --git a/src/ruby/install.sh b/src/ruby/install.sh index 2b47d8a27..39cb5be03 100755 --- a/src/ruby/install.sh +++ b/src/ruby/install.sh @@ -23,9 +23,6 @@ ADDITIONAL_VERSIONS="${ADDITIONALVERSIONS:-""}" DEFAULT_GEMS="rake" RVM_GPG_KEYS="409B6B1796C275462A1703113804BB82D39DC0E3 7D2BAF1CF37B13E2069D6956105BD0E739499BDB" -GPG_KEY_SERVERS="keyserver hkp://keyserver.ubuntu.com -keyserver hkps://keys.openpgp.org -keyserver hkp://keyserver.pgp.com" set -e @@ -71,6 +68,38 @@ updaterc() { fi } +# Get the list of GPG key servers that are reachable +get_gpg_key_servers() { + declare -A keyservers_curl_map=( + ["hkp://keyserver.ubuntu.com"]="http://keyserver.ubuntu.com:11371" + ["hkp://keyserver.ubuntu.com:80"]="http://keyserver.ubuntu.com" + ["hkps://keys.openpgp.org"]="https://keys.openpgp.org" + ["hkp://keyserver.pgp.com"]="http://keyserver.pgp.com:11371" + ) + + local curl_args="" + local keyserver_reachable=false # Flag to indicate if any keyserver is reachable + + if [ ! -z "${KEYSERVER_PROXY}" ]; then + curl_args="--proxy ${KEYSERVER_PROXY}" + fi + + for keyserver in "${!keyservers_curl_map[@]}"; do + local keyserver_curl_url="${keyservers_curl_map[${keyserver}]}" + if curl -s ${curl_args} --max-time 5 ${keyserver_curl_url} > /dev/null; then + echo "keyserver ${keyserver}" + keyserver_reachable=true + else + echo "(*) Keyserver ${keyserver} is not reachable." >&2 + fi + done + + if ! $keyserver_reachable; then + echo "(!) No keyserver is reachable." >&2 + exit 1 + fi +} + # Import the specified key in a variable name passed in as receive_gpg_keys() { local keys=${!1} @@ -79,11 +108,16 @@ receive_gpg_keys() { keyring_args="--no-default-keyring --keyring \"$2\"" fi + # Install curl + if ! type curl > /dev/null 2>&1; then + check_packages curl + fi + # Use a temporary location for gpg keys to avoid polluting image export GNUPGHOME="/tmp/tmp-gnupg" mkdir -p ${GNUPGHOME} chmod 700 ${GNUPGHOME} - echo -e "disable-ipv6\n${GPG_KEY_SERVERS}" > ${GNUPGHOME}/dirmngr.conf + echo -e "disable-ipv6\n$(get_gpg_key_servers)" > ${GNUPGHOME}/dirmngr.conf # GPG key download sometimes fails for some reason and retrying fixes it. local retry_count=0 local gpg_ok="false" @@ -93,7 +127,7 @@ receive_gpg_keys() { echo "(*) Downloading GPG key..." ( echo "${keys}" | xargs -n 1 gpg -q ${keyring_args} --recv-keys) 2>&1 && gpg_ok="true" if [ "${gpg_ok}" != "true" ]; then - echo "(*) Failed getting key, retring in 10s..." + echo "(*) Failed getting key, retrying in 10s..." (( retry_count++ )) sleep 10s fi @@ -139,6 +173,47 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + apt_get_update() { if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then @@ -165,32 +240,69 @@ if [ "${architecture}" != "amd64" ] && [ "${architecture}" != "x86_64" ] && [ "$ fi # Install dependencies -check_packages curl ca-certificates software-properties-common build-essential gnupg2 libreadline-dev \ +# Removed software-properties-common package from here as it has been removed for debian trixie(13) +check_packages curl ca-certificates build-essential gnupg2 libreadline-dev \ procps dirmngr gawk autoconf automake bison libffi-dev libgdbm-dev libncurses5-dev \ libsqlite3-dev libtool libyaml-dev pkg-config sqlite3 zlib1g-dev libgmp-dev libssl-dev if ! type git > /dev/null 2>&1; then check_packages git fi +# Conditionally install software-properties-common (skip on Debian Trixie) +if type apt-get >/dev/null 2>&1; then + if [ -f /etc/os-release ]; then + . /etc/os-release + if [ "${ID}" = "debian" ] && [ "${VERSION_CODENAME}" = "trixie" ]; then + echo "Skipping software-properties-common on Debian Trixie." + else + check_packages software-properties-common + fi + else + # Fallback for apt-based systems without /etc/os-release + check_packages software-properties-common + fi +fi + +# Function to fetch the version released prior to the latest version +get_previous_version() { + local url=$1 + local repo_url=$2 + variable_name=$3 + prev_version=${!variable_name} + + output=$(curl -s "$repo_url"); + + #install jq + check_packages jq + + message=$(echo "$output" | jq -r '.message') + + if [[ $message == "API rate limit exceeded"* ]]; then + echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}" + echo -e "\nAttempting to find latest version using GitHub tags." + find_prev_version_from_git_tags prev_version "$url" "tags/v" "_" + declare -g ${variable_name}="${prev_version}" + else + echo -e "\nAttempting to find latest version using GitHub Api." + version=$(echo "$output" | jq -r '.tag_name' | tr '_' '.') + declare -g ${variable_name}="${version#v}" + fi + echo "${variable_name}=${!variable_name}" +} + +get_github_api_repo_url() { + local url=$1 + echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases/latest" +} + # Figure out correct version of a three part version number is not passed -find_version_from_git_tags RUBY_VERSION "https://github.com/ruby/ruby" "tags/v" "_" +RUBY_URL="https://github.com/ruby/ruby" +ORIGINAL_RUBY_VERSION=$RUBY_VERSION +find_version_from_git_tags RUBY_VERSION $RUBY_URL "tags/v" "_" -# Just install Ruby if RVM already installed -if rvm --version > /dev/null; then - echo "Ruby Version Manager already exists." - if [[ "$(ruby -v)" = *"${RUBY_VERSION}"* ]]; then - echo "(!) Ruby is already installed with version ${RUBY_VERSION}. Skipping..." - elif [ "${RUBY_VERSION}" != "none" ]; then - echo "Installing specified Ruby version." - su ${USERNAME} -c "rvm install ruby ${RUBY_VERSION}" - fi - SKIP_GEM_INSTALL="false" - SKIP_RBENV_RBUILD="true" -else - # Install RVM - receive_gpg_keys RVM_GPG_KEYS - # Determine appropriate settings for rvm installer +set_rvm_install_args() { + RUBY_VERSION=$1 if [ "${RUBY_VERSION}" = "none" ]; then RVM_INSTALL_ARGS="" elif [[ "$(ruby -v)" = *"${RUBY_VERSION}"* ]]; then @@ -209,19 +321,48 @@ else DEFAULT_GEMS="" fi fi +} + +install_previous_version() { + if [[ $ORIGINAL_RUBY_VERSION == "latest" ]]; then + repo_url=$(get_github_api_repo_url "$RUBY_URL") + get_previous_version "${RUBY_URL}" "${repo_url}" RUBY_VERSION + set_rvm_install_args $RUBY_VERSION + curl -sSL https://get.rvm.io | bash -s stable --ignore-dotfiles ${RVM_INSTALL_ARGS} --with-default-gems="${DEFAULT_GEMS}" 2>&1 + else + echo "Failed to install Ruby version $ORIGINAL_RUBY_VERSION. Exiting..." + fi +} + +# Just install Ruby if RVM already installed +if rvm --version > /dev/null; then + echo "Ruby Version Manager already exists." + if [[ "$(ruby -v)" = *"${RUBY_VERSION}"* ]]; then + echo "(!) Ruby is already installed with version ${RUBY_VERSION}. Skipping..." + elif [ "${RUBY_VERSION}" != "none" ]; then + echo "Installing specified Ruby version." + su ${USERNAME} -c "rvm install ruby ${RUBY_VERSION}" + fi + SKIP_GEM_INSTALL="false" + SKIP_RBENV_RBUILD="true" +else + # Install RVM + receive_gpg_keys RVM_GPG_KEYS + # Determine appropriate settings for rvm installer + set_rvm_install_args $RUBY_VERSION # Create rvm group as a system group to reduce the odds of conflict with local user UIDs if ! cat /etc/group | grep -e "^rvm:" > /dev/null 2>&1; then groupadd -r rvm fi # Install rvm - curl -sSL https://get.rvm.io | bash -s stable --ignore-dotfiles ${RVM_INSTALL_ARGS} --with-default-gems="${DEFAULT_GEMS}" 2>&1 + curl -sSL https://get.rvm.io | bash -s stable --ignore-dotfiles ${RVM_INSTALL_ARGS} --with-default-gems="${DEFAULT_GEMS}" 2>&1 || install_previous_version usermod -aG rvm ${USERNAME} source /usr/local/rvm/scripts/rvm rvm fix-permissions system rm -rf ${GNUPGHOME} fi -if [ "${INSTALL_RUBY_TOOLS}" = "true" ]; then +if [ "${INSTALL_RUBY_TOOLS}" = "true" ]; then # Non-root user may not have "gem" in path when script is run and no ruby version # is installed by rvm, so handle this by using root's default gem in this case ROOT_GEM="$(which gem || echo "")" @@ -238,7 +379,7 @@ if [ ! -z "${ADDITIONAL_VERSIONS}" ]; then read -a additional_versions <<< "$ADDITIONAL_VERSIONS" for version in "${additional_versions[@]}"; do # Figure out correct version of a three part version number is not passed - find_version_from_git_tags version "https://github.com/ruby/ruby" "tags/v" "_" + find_version_from_git_tags version $RUBY_URL "tags/v" "_" source /usr/local/rvm/scripts/rvm rvm install ruby ${version} done diff --git a/src/rust/NOTES.md b/src/rust/NOTES.md index 19fe92f31..1f01e6e52 100644 --- a/src/rust/NOTES.md +++ b/src/rust/NOTES.md @@ -2,6 +2,11 @@ ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +This Feature should work on recent versions of Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, RockyLinux +and Mariner distributions with the `apt`, `yum`, `dnf`, `microdnf` and `tdnf` package manager installed. + + +**Note:** Alpine is not supported because the rustup-init binary requires glibc to run, but Alpine Linux does not include `glibc` +by default. Instead, it uses musl libc, which is not binary-compatible with glibc. `bash` is required to execute the `install.sh` script. diff --git a/src/rust/README.md b/src/rust/README.md index 974d78516..ef0bc2311 100644 --- a/src/rust/README.md +++ b/src/rust/README.md @@ -17,6 +17,8 @@ Installs Rust, common Rust utilities, and their required dependencies |-----|-----|-----|-----| | version | Select or enter a version of Rust to install. | string | latest | | profile | Select a rustup install profile. | string | minimal | +| targets | Optional comma separated list of additional Rust targets to install. | string | - | +| components | Optional, comma separated list of Rust components to be installed | string | rust-analyzer,rust-src,rustfmt,clippy | ## Customizations @@ -25,13 +27,17 @@ Installs Rust, common Rust utilities, and their required dependencies - `vadimcn.vscode-lldb` - `rust-lang.rust-analyzer` - `tamasfe.even-better-toml` -- `serayuzgur.crates` ## OS Support -This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. +This Feature should work on recent versions of Debian/Ubuntu, RedHat Enterprise Linux, Fedora, Alma, RockyLinux +and Mariner distributions with the `apt`, `yum`, `dnf`, `microdnf` and `tdnf` package manager installed. + + +**Note:** Alpine is not supported because the rustup-init binary requires glibc to run, but Alpine Linux does not include `glibc` +by default. Instead, it uses musl libc, which is not binary-compatible with glibc. `bash` is required to execute the `install.sh` script. diff --git a/src/rust/devcontainer-feature.json b/src/rust/devcontainer-feature.json index 70ed39eab..88b64daed 100644 --- a/src/rust/devcontainer-feature.json +++ b/src/rust/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "rust", - "version": "1.1.0", + "version": "1.5.0", "name": "Rust", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/rust", "description": "Installs Rust, common Rust utilities, and their required dependencies", @@ -10,16 +10,30 @@ "proposals": [ "latest", "none", + "1.87", + "1.86", + "1.85", + "1.84", + "1.83", + "1.82", + "1.81", + "1.80", + "1.79", + "1.78", + "1.77", + "1.76", + "1.75", + "1.74", + "1.73", + "1.72", + "1.71", "1.70", "1.69", "1.68", "1.67", "1.66", "1.65", - "1.64", - "1.63", - "1.62", - "1.61" + "1.64" ], "default": "latest", "description": "Select or enter a version of Rust to install." @@ -33,20 +47,45 @@ ], "default": "minimal", "description": "Select a rustup install profile." - } + }, + "targets": { + "type": "string", + "default": "", + "description": "Optional comma separated list of additional Rust targets to install.", + "proposals": [ + "aarch64-unknown-linux-gnu", + "armv7-unknown-linux-gnueabihf", + "x86_64-unknown-redox,x86_64-unknown-uefi" + ] + }, + "components": { + "type": "string", + "default": "rust-analyzer,rust-src,rustfmt,clippy", + "description": "Optional, comma separated list of Rust components to be installed", + "proposals": [ + "rust-analyzer,rust-src,rustfmt,clippy", + "rust-analyzer,rust-src", + "rustfmt,clippy,rust-docs", + "llvm-tools-preview,rust-src,rustfmt" + ] + } }, "customizations": { "vscode": { "extensions": [ "vadimcn.vscode-lldb", "rust-lang.rust-analyzer", - "tamasfe.even-better-toml", - "serayuzgur.crates" + "tamasfe.even-better-toml" ], "settings": { "files.watcherExclude": { "**/target/**": true - } + }, + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes Rust, common Rust utilities, and needed dependencies pre-installed and available on the `PATH`, along with the Rust language extension for Rust development." + } + ] } } }, diff --git a/src/rust/install.sh b/src/rust/install.sh index 1a309923a..99a7ba8f5 100755 --- a/src/rust/install.sh +++ b/src/rust/install.sh @@ -9,6 +9,8 @@ RUST_VERSION="${VERSION:-"latest"}" RUSTUP_PROFILE="${PROFILE:-"minimal"}" +RUSTUP_TARGETS="${TARGETS:-""}" +IFS=',' read -ra components <<< "${COMPONENTS:-rust-analyzer,rust-src,rustfmt,clippy}" export CARGO_HOME="${CARGO_HOME:-"/usr/local/cargo"}" export RUSTUP_HOME="${RUSTUP_HOME:-"/usr/local/rustup"}" @@ -18,8 +20,76 @@ UPDATE_RUST="${UPDATE_RUST:-"false"}" set -e -# Clean up -rm -rf /var/lib/apt/lists/* +# Detect the Linux distribution and package manager +PKG_MANAGER="" + +# Bring in ID, ID_LIKE, VERSION_ID, VERSION_CODENAME +. /etc/os-release +# Get an adjusted ID independent of distro variants +if [ "${ID}" = "debian" ] || [ "${ID_LIKE}" = "debian" ]; then + ADJUSTED_ID="debian" +elif [ "${ID}" = "alpine" ]; then + ADJUSTED_ID="alpine" +elif [[ "${ID}" = "rhel" || "${ID}" = "fedora" || "${ID}" = "mariner" || "${ID_LIKE}" = *"rhel"* || "${ID_LIKE}" = *"fedora"* || "${ID_LIKE}" = *"mariner"* ]]; then + ADJUSTED_ID="rhel" + VERSION_CODENAME="${ID}${VERSION_ID}" +else + echo "Linux distro ${ID} not supported." + exit 1 +fi + +if [ "${ADJUSTED_ID}" = "rhel" ] && [ "${VERSION_CODENAME-}" = "centos7" ]; then + # As of 1 July 2024, mirrorlist.centos.org no longer exists. + # Update the repo files to reference vault.centos.org. + sed -i s/mirror.centos.org/vault.centos.org/g /etc/yum.repos.d/*.repo + sed -i s/^#.*baseurl=http/baseurl=http/g /etc/yum.repos.d/*.repo + sed -i s/^mirrorlist=http/#mirrorlist=http/g /etc/yum.repos.d/*.repo +fi + + +# Detect package manager +if command -v apt-get >/dev/null 2>&1; then + PKG_MANAGER="apt" +elif command -v dnf >/dev/null 2>&1; then + PKG_MANAGER="dnf" +elif command -v yum >/dev/null 2>&1; then + PKG_MANAGER="yum" +elif command -v microdnf >/dev/null 2>&1; then + PKG_MANAGER="microdnf" +elif command -v tdnf >/dev/null 2>&1; then + PKG_MANAGER="tdnf" +else + echo "No supported package manager found. Supported: apt, dnf, yum, microdnf, tdnf" + exit 1 +fi + +echo "Detected package manager: $PKG_MANAGER" + +# Clean up based on package manager +clean_package_cache() { + case "$PKG_MANAGER" in + apt) + if [ "$(ls -1 /var/lib/apt/lists/ 2>/dev/null | wc -l)" -gt 0 ]; then + rm -rf /var/lib/apt/lists/* + fi + ;; + dnf|yum|microdnf) + if command -v dnf >/dev/null 2>&1; then + dnf clean all + elif command -v yum >/dev/null 2>&1; then + yum clean all + elif command -v microdnf >/dev/null 2>&1; then + microdnf clean all + fi + ;; + tdnf) + tdnf clean all + ;; + esac +} + +# Initial cleanup +clean_package_cache if [ "$(id -u)" -ne 0 ]; then echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' @@ -36,7 +106,7 @@ if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then USERNAME="" POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") for CURRENT_USER in "${POSSIBLE_USERS[@]}"; do - if id -u ${CURRENT_USER} > /dev/null 2>&1; then + if id -u "${CURRENT_USER}" > /dev/null 2>&1; then USERNAME=${CURRENT_USER} break fi @@ -44,25 +114,10 @@ if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then if [ "${USERNAME}" = "" ]; then USERNAME=root fi -elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then +elif [ "${USERNAME}" = "none" ] || ! id -u "${USERNAME}" > /dev/null 2>&1; then USERNAME=root fi -# Get central common setting -get_common_setting() { - if [ "${common_settings_file_loaded}" != "true" ]; then - curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping." - common_settings_file_loaded=true - fi - if [ -f "/tmp/vsdc-settings.env" ]; then - local multi_line="" - if [ "$2" = "true" ]; then multi_line="-z"; fi - local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')" - if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi - fi - echo "$1=${!1}" -} - # Figure out correct version of a three part version number is not passed find_version_from_git_tags() { local variable_name=$1 @@ -71,7 +126,7 @@ find_version_from_git_tags() { local repository=$2 local prefix=${3:-"tags/v"} local separator=${4:-"."} - local last_part_optional=${5:-"false"} + local last_part_optional=${5:-"false"} if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then local escaped_separator=${separator//./\\.} local last_part @@ -81,7 +136,7 @@ find_version_from_git_tags() { last_part="${escaped_separator}[0-9]+" fi local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$" - local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" + local version_list="$(git ls-remote --tags "${repository}" | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)" else @@ -104,13 +159,13 @@ check_nightly_version_formatting() { local version_date=$(echo ${requested_version} | sed -e "s/^nightly-//") - date -d ${version_date} &>/dev/null - if [ $? != 0 ]; then + + if ! date -d "${version_date}" &>/dev/null; then echo -e "Invalid ${variable_name} value: ${requested_version}\nNightly version should be in the format nightly-YYYY-MM-DD" >&2 exit 1 fi - if [ $(date -d ${version_date} +%s) -ge $(date +%s) ]; then + if [ "$(date -d "${version_date}" +%s)" -ge "$(date +%s)" ]; then echo -e "Invalid ${variable_name} value: ${requested_version}\nNightly version should not exceed current date" >&2 exit 1 fi @@ -118,48 +173,174 @@ check_nightly_version_formatting() { updaterc() { if [ "${UPDATE_RC}" = "true" ]; then - echo "Updating /etc/bash.bashrc and /etc/zsh/zshrc..." - if [[ "$(cat /etc/bash.bashrc)" != *"$1"* ]]; then - echo -e "$1" >> /etc/bash.bashrc + echo "Updating shell configuration files..." + local bashrc_file="/etc/bash.bashrc" + + # Different distributions use different bashrc locations + if [ ! -f "$bashrc_file" ]; then + if [ -f "/etc/bashrc" ]; then + bashrc_file="/etc/bashrc" + elif [ -f "/etc/bash/bashrc" ]; then + bashrc_file="/etc/bash/bashrc" + fi + fi + + if [ -f "$bashrc_file" ] && [[ "$(cat "$bashrc_file")" != *"$1"* ]]; then + echo -e "$1" >> "$bashrc_file" fi + if [ -f "/etc/zsh/zshrc" ] && [[ "$(cat /etc/zsh/zshrc)" != *"$1"* ]]; then echo -e "$1" >> /etc/zsh/zshrc fi fi } -apt_get_update() -{ - if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then - echo "Running apt-get update..." - apt-get update -y - fi +# Package update functions +pkg_mgr_update() { + case "$PKG_MANAGER" in + apt) + if [ "$(find /var/lib/apt/lists/* 2>/dev/null | wc -l)" = "0" ]; then + echo "Running apt-get update..." + apt-get update -y + fi + ;; + dnf) + dnf check-update || true + ;; + yum) + yum check-update || true + ;; + microdnf) + # microdnf doesn't have check-update + true + ;; + tdnf) + tdnf makecache || true + ;; + esac +} + +# Check if package is installed +is_package_installed() { + local package=$1 + case "$PKG_MANAGER" in + apt) + dpkg -s "$package" >/dev/null 2>&1 + ;; + dnf|yum|microdnf|tdnf) + rpm -q "$package" >/dev/null 2>&1 + ;; + esac } -# Checks if packages are installed and installs them if not +# Unified package checking and installation function check_packages() { - if ! dpkg -s "$@" >/dev/null 2>&1; then - apt_get_update - apt-get -y install --no-install-recommends "$@" + local packages=("$@") + local missing_packages=() + + # Check if curl-minimal is installed and swap it with curl + if is_package_installed "curl-minimal"; then + echo "curl-minimal is installed. Swapping it with curl..." + case "$PKG_MANAGER" in + dnf|yum|microdnf) + ${PKG_MANAGER} swap curl-minimal curl -y + ;; + tdnf) + tdnf remove -y curl-minimal + tdnf install -y curl + ;; + *) + echo "Package manager does not support swapping curl-minimal with curl. Please handle this manually." + ;; + esac + fi + + # Map package names based on distribution + for i in "${!packages[@]}"; do + case "$PKG_MANAGER" in + dnf|yum|microdnf|tdnf) + case "${packages[$i]}" in + "libc6-dev") packages[$i]="glibc-devel" ;; + "python3-minimal") packages[$i]="python3" ;; + "libpython3.*") packages[$i]="python3-devel" ;; + "gnupg2") packages[$i]="gnupg" ;; + esac + ;; + esac + done + + # Check which packages are missing + for package in "${packages[@]}"; do + if [ -n "$package" ] && ! is_package_installed "$package"; then + missing_packages+=("$package") + fi + done + + # Install missing packages + if [ ${#missing_packages[@]} -gt 0 ]; then + pkg_mgr_update + case "$PKG_MANAGER" in + apt) + apt-get -y install --no-install-recommends "${missing_packages[@]}" + ;; + dnf) + dnf install -y "${missing_packages[@]}" + ;; + yum) + yum install -y "${missing_packages[@]}" + ;; + microdnf) + microdnf install -y "${missing_packages[@]}" + ;; + tdnf) + tdnf install -y "${missing_packages[@]}" + ;; + esac fi } export DEBIAN_FRONTEND=noninteractive # Install curl, lldb, python3-minimal,libpython and rust dependencies if missing -if ! dpkg -s curl ca-certificates gnupg2 lldb python3-minimal gcc libc6-dev > /dev/null 2>&1; then - apt_get_update - apt-get -y install --no-install-recommends curl ca-certificates gcc libc6-dev - apt-get -y install lldb python3-minimal libpython3.? +echo "Installing required dependencies..." +check_packages curl ca-certificates gcc libc6-dev gnupg2 git + +# Install optional dependencies (continue if they fail) +case "$PKG_MANAGER" in + apt) + check_packages lldb python3-minimal libpython3.? || true + ;; + dnf|yum|microdnf) + check_packages lldb python3 python3-devel || true + ;; + tdnf) + check_packages python3 python3-devel || true + # LLDB might not be available in Photon/Mariner + ;; +esac + +# Get architecture +if command -v dpkg >/dev/null 2>&1; then + architecture="$(dpkg --print-architecture)" +else + architecture="$(uname -m)" + # Convert common architectures to Debian equivalents + case ${architecture} in + x86_64) + architecture="amd64" + ;; + aarch64) + architecture="arm64" + ;; + esac fi -architecture="$(dpkg --print-architecture)" download_architecture="${architecture}" case ${download_architecture} in - amd64) + amd64|x86_64) download_architecture="x86_64" ;; - arm64) + arm64|aarch64) download_architecture="aarch64" ;; *) echo "(!) Architecture ${architecture} not supported." @@ -169,7 +350,7 @@ esac # Install Rust umask 0002 -if ! cat /etc/group | grep -e "^rustlang:" > /dev/null 2>&1; then +if ! grep -e "^rustlang:" /etc/group > /dev/null 2>&1; then groupadd -r rustlang fi usermod -a -G rustlang "${USERNAME}" @@ -187,7 +368,7 @@ else fi is_nightly=0 - echo ${RUST_VERSION} | grep -q "nightly" || is_nightly=$? + echo "${RUST_VERSION}" | grep -q "nightly" || is_nightly=$? if [ $is_nightly = 0 ]; then check_nightly_version_formatting RUST_VERSION else @@ -201,9 +382,10 @@ else curl -sSL --proto '=https' --tlsv1.2 "https://static.rust-lang.org/rustup/dist/${download_architecture}-unknown-linux-gnu/rustup-init" -o /tmp/rustup/target/${download_architecture}-unknown-linux-gnu/release/rustup-init curl -sSL --proto '=https' --tlsv1.2 "https://static.rust-lang.org/rustup/dist/${download_architecture}-unknown-linux-gnu/rustup-init.sha256" -o /tmp/rustup/rustup-init.sha256 cd /tmp/rustup + cp /tmp/rustup/target/${download_architecture}-unknown-linux-gnu/release/rustup-init /tmp/rustup/rustup-init sha256sum -c rustup-init.sha256 chmod +x target/${download_architecture}-unknown-linux-gnu/release/rustup-init - target/${download_architecture}-unknown-linux-gnu/release/rustup-init -y --no-modify-path --profile ${RUSTUP_PROFILE} ${default_toolchain_arg} + target/${download_architecture}-unknown-linux-gnu/release/rustup-init -y --no-modify-path --profile "${RUSTUP_PROFILE}" ${default_toolchain_arg} cd ~ rm -rf /tmp/rustup fi @@ -213,8 +395,27 @@ if [ "${UPDATE_RUST}" = "true" ]; then echo "Updating Rust..." rustup update 2>&1 fi -echo "Installing common Rust dependencies..." -rustup component add rls rust-analysis rust-src rustfmt clippy 2>&1 +# Install Rust components +echo "Installing Rust components..." +for component in "${components[@]}"; do + # Trim leading and trailing whitespace + component="${component#"${component%%[![:space:]]*}"}" && component="${component%"${component##*[![:space:]]}"}" + if [ -n "${component}" ]; then + echo "Installing Rust component: ${component}" + if ! rustup component add "${component}" 2>&1; then + echo "Warning: Failed to install component '${component}'. It may not be available for this toolchain." >&2 + exit 1 + fi + fi +done + +if [ -n "${RUSTUP_TARGETS}" ]; then + IFS=',' read -ra targets <<< "${RUSTUP_TARGETS}" + for target in "${targets[@]}"; do + echo "Installing additional Rust target $target" + rustup target add "$target" 2>&1 + done +fi # Add CARGO_HOME, RUSTUP_HOME and bin directory into bashrc/zshrc files (unless disabled) updaterc "$(cat << EOF @@ -228,7 +429,6 @@ EOF chmod -R g+r+w "${RUSTUP_HOME}" "${CARGO_HOME}" # Clean up -rm -rf /var/lib/apt/lists/* +clean_package_cache echo "Done!" - diff --git a/src/sshd/README.md b/src/sshd/README.md index 7314e16af..f4215b391 100644 --- a/src/sshd/README.md +++ b/src/sshd/README.md @@ -15,6 +15,7 @@ Adds a SSH server into a container so that you can use an external terminal, sft | Options Id | Description | Type | Default Value | |-----|-----|-----|-----| +| gatewayPorts | Enable other hosts in the same network to connect to the forwarded ports | string | no | version | Currently unused. | string | latest | ## Usage diff --git a/src/sshd/devcontainer-feature.json b/src/sshd/devcontainer-feature.json index a5d464f69..46c4ef302 100644 --- a/src/sshd/devcontainer-feature.json +++ b/src/sshd/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "sshd", - "version": "1.0.9", + "version": "1.1.0", "name": "SSH server", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/sshd", "description": "Adds a SSH server into a container so that you can use an external terminal, sftp, or SSHFS to interact with it.", @@ -12,9 +12,30 @@ ], "default": "latest", "description": "Currently unused." + }, + "gatewayPorts": { + "type": "string", + "enum": [ + "no", + "yes", + "clientspecified" + ], + "default": "no", + "description": "Enable other hosts in the same network to connect to the forwarded ports" } }, "entrypoint": "/usr/local/share/ssh-init.sh", + "customizations": { + "vscode": { + "settings": { + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes an SSH server so that you can use an external terminal, sftp, or SSHFS to interact with it. The first time you've started the container, you will want to set a password for your user. With each connection to the container, you'll want to forward the SSH port to your local machine and use a local terminal or other tool to connect using the password you set." + } + ] + } + } + }, "installsAfter": [ "ghcr.io/devcontainers/features/common-utils" ] diff --git a/src/sshd/install.sh b/src/sshd/install.sh index 146040896..9b9ddedf2 100755 --- a/src/sshd/install.sh +++ b/src/sshd/install.sh @@ -13,6 +13,7 @@ SSHD_PORT="${SSHD_PORT:-"2222"}" USERNAME="${USERNAME:-"${_REMOTE_USER:-"automatic"}"}" START_SSHD="${START_SSHD:-"false"}" NEW_PASSWORD="${NEW_PASSWORD:-"skip"}" +GATEWAY_PORTS="${GATEWAYPORTS:-"no"}" set -e @@ -89,6 +90,7 @@ mkdir -p /var/run/sshd sed -i 's/session\s*required\s*pam_loginuid\.so/session optional pam_loginuid.so/g' /etc/pam.d/sshd sed -i 's/#*PermitRootLogin prohibit-password/PermitRootLogin yes/g' /etc/ssh/sshd_config sed -i -E "s/#*\s*Port\s+.+/Port ${SSHD_PORT}/g" /etc/ssh/sshd_config +sed -i "s/#GatewayPorts no/GatewayPorts ${GATEWAY_PORTS}/g" /etc/ssh/sshd_config # Need to UsePAM so /etc/environment is processed sed -i -E "s/#?\s*UsePAM\s+.+/UsePAM yes/g" /etc/ssh/sshd_config diff --git a/src/terraform/NOTES.md b/src/terraform/NOTES.md index 19fe92f31..74a90639e 100644 --- a/src/terraform/NOTES.md +++ b/src/terraform/NOTES.md @@ -1,5 +1,40 @@ +## Licensing + +On August 10, 2023, HashiCorp announced a change of license for its products, including Terraform. After ~9 years of Terraform being open source under the MPL v2 license, it was to move under a non-open source BSL v1.1 license, starting from the next (1.6) version. See https://github.com/hashicorp/terraform/blob/main/LICENSE + +## Custom Download Server + +The `customDownloadServer` option allows you to specify an alternative server for downloading Terraform and Sentinel packages. This is useful for organizations that maintain internal mirrors or have proxies for HashiCorp downloads. + +When using this option: +- Provide the complete URL including protocol (e.g., `https://my-mirror.example.com`) +- The server should mirror the HashiCorp releases structure + +Example: +```json +"features": { + "ghcr.io/devcontainers/features/terraform:1": { + "customDownloadServer": "https://my-mirror.example.com" + } +} +``` + +### ⚠️ Security Considerations + +When using a custom download server, be aware of the following security implications: + +- **Server Verification**: Always verify that the custom server is trustworthy and maintained by your organization or a trusted entity. Using an untrusted or compromised server could lead to downloading malicious software. + +- **Supply Chain Risks**: Malicious actors may attempt to distribute compromised versions of Terraform that contain backdoors, cryptominers, or other harmful code. + +- **Integrity Checks**: The feature performs SHA256 checks when available, but these are only as trustworthy as the source of the checksums. If both the binaries and checksums come from a compromised server, the integrity check may pass despite the software being malicious. + +- **Organizational Policy**: Ensure your custom download server adheres to your organization's security policies and implements proper access controls. + +Always use the official HashiCorp download server (https://releases.hashicorp.com) unless you have a specific need for an alternative source. + ## OS Support This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. diff --git a/src/terraform/README.md b/src/terraform/README.md index 2201c8db8..4b37b4260 100644 --- a/src/terraform/README.md +++ b/src/terraform/README.md @@ -22,16 +22,51 @@ Installs the Terraform CLI and optionally TFLint and Terragrunt. Auto-detects la | installTFsec | Install tfsec, a tool to spot potential misconfigurations for your terraform code | boolean | false | | installTerraformDocs | Install terraform-docs, a utility to generate documentation from Terraform modules | boolean | false | | httpProxy | Connect to a keyserver using a proxy by configuring this option | string | - | +| customDownloadServer | Custom server URL for downloading Terraform and Sentinel packages, including protocol (e.g., https://releases.hashicorp.com). If not provided, the default HashiCorp download server (https://releases.hashicorp.com) will be used. | string | - | ## Customizations ### VS Code Extensions - `HashiCorp.terraform` -- `ms-azuretools.vscode-azureterraform` +## Licensing + +On August 10, 2023, HashiCorp announced a change of license for its products, including Terraform. After ~9 years of Terraform being open source under the MPL v2 license, it was to move under a non-open source BSL v1.1 license, starting from the next (1.6) version. See https://github.com/hashicorp/terraform/blob/main/LICENSE + +## Custom Download Server + +The `customDownloadServer` option allows you to specify an alternative server for downloading Terraform and Sentinel packages. This is useful for organizations that maintain internal mirrors or have proxies for HashiCorp downloads. + +When using this option: +- Provide the complete URL including protocol (e.g., `https://my-mirror.example.com`) +- The server should mirror the HashiCorp releases structure + +Example: +```json +"features": { + "ghcr.io/devcontainers/features/terraform:1": { + "customDownloadServer": "https://my-mirror.example.com" + } +} +``` + +### ⚠️ Security Considerations + +When using a custom download server, be aware of the following security implications: + +- **Server Verification**: Always verify that the custom server is trustworthy and maintained by your organization or a trusted entity. Using an untrusted or compromised server could lead to downloading malicious software. + +- **Supply Chain Risks**: Malicious actors may attempt to distribute compromised versions of Terraform that contain backdoors, cryptominers, or other harmful code. + +- **Integrity Checks**: The feature performs SHA256 checks when available, but these are only as trustworthy as the source of the checksums. If both the binaries and checksums come from a compromised server, the integrity check may pass despite the software being malicious. + +- **Organizational Policy**: Ensure your custom download server adheres to your organization's security policies and implements proper access controls. + +Always use the official HashiCorp download server (https://releases.hashicorp.com) unless you have a specific need for an alternative source. + ## OS Support This Feature should work on recent versions of Debian/Ubuntu-based distributions with the `apt` package manager installed. diff --git a/src/terraform/devcontainer-feature.json b/src/terraform/devcontainer-feature.json index 119b4335e..a72f18993 100644 --- a/src/terraform/devcontainer-feature.json +++ b/src/terraform/devcontainer-feature.json @@ -1,6 +1,6 @@ { "id": "terraform", - "version": "1.3.4", + "version": "1.4.2", "name": "Terraform, tflint, and TFGrunt", "documentationURL": "https://github.com/devcontainers/features/tree/main/src/terraform", "description": "Installs the Terraform CLI and optionally TFLint and Terragrunt. Auto-detects latest version and installs needed dependencies.", @@ -54,20 +54,28 @@ "type": "string", "default": "", "description": "Connect to a keyserver using a proxy by configuring this option" + }, + "customDownloadServer": { + "type": "string", + "default": "", + "description": "Custom server URL for downloading Terraform and Sentinel packages, including protocol (e.g., https://releases.hashicorp.com). If not provided, the default HashiCorp download server (https://releases.hashicorp.com) will be used." } }, "customizations": { "vscode": { "extensions": [ - "HashiCorp.terraform", - "ms-azuretools.vscode-azureterraform" + "HashiCorp.terraform" ], "settings": { "terraform.languageServer.enable": true, "terraform.languageServer.args": [ "serve" ], - "azureTerraform.terminal": "integrated" + "github.copilot.chat.codeGeneration.instructions": [ + { + "text": "This dev container includes the Terraform CLI and optionally TFLint and Terragrunt pre-installed and available on the `PATH`, along with the Terraform extension for Terraform development." + } + ] } } }, diff --git a/src/terraform/install.sh b/src/terraform/install.sh index 1156f6d41..999815a38 100755 --- a/src/terraform/install.sh +++ b/src/terraform/install.sh @@ -18,6 +18,9 @@ TERRAGRUNT_VERSION="${TERRAGRUNT:-"latest"}" INSTALL_SENTINEL=${INSTALLSENTINEL:-false} INSTALL_TFSEC=${INSTALLTFSEC:-false} INSTALL_TERRAFORM_DOCS=${INSTALLTERRAFORMDOCS:-false} +CUSTOM_DOWNLOAD_SERVER="${CUSTOMDOWNLOADSERVER:-""}" +# This is because ubuntu noble and debian trixie don't support the old format of GPG keys and validation +NEW_GPG_CODENAMES="trixie noble" TERRAFORM_SHA256="${TERRAFORM_SHA256:-"automatic"}" TFLINT_SHA256="${TFLINT_SHA256:-"automatic"}" @@ -26,11 +29,13 @@ SENTINEL_SHA256="${SENTINEL_SHA256:-"automatic"}" TFSEC_SHA256="${TFSEC_SHA256:-"automatic"}" TERRAFORM_DOCS_SHA256="${TERRAFORM_DOCS_SHA256:-"automatic"}" +HASHICORP_RELEASES_URL="https://releases.hashicorp.com" +if [ -n "${CUSTOM_DOWNLOAD_SERVER}" ]; then + HASHICORP_RELEASES_URL="${CUSTOM_DOWNLOAD_SERVER}" +fi + TERRAFORM_GPG_KEY="72D7468F" TFLINT_GPG_KEY_URI="https://raw.githubusercontent.com/terraform-linters/tflint/v0.46.1/8CE69160EB3F2FE9.key" -GPG_KEY_SERVERS="keyserver hkps://keyserver.ubuntu.com -keyserver hkps://keys.openpgp.org -keyserver hkps://keyserver.pgp.com" KEYSERVER_PROXY="${HTTPPROXY:-"${HTTP_PROXY:-""}"}" architecture="$(uname -m)" @@ -47,6 +52,44 @@ if [ "$(id -u)" -ne 0 ]; then exit 1 fi +# Detect Ubuntu Noble or Debian Trixie and use new repo setup, else use legacy GPG logic +IS_GPG_NEW=0 +. /etc/os-release +if [[ "${NEW_GPG_CODENAMES}" == *"${VERSION_CODENAME}"* ]]; then + IS_GPG_NEW=1 +fi + +# Get the list of GPG key servers that are reachable +get_gpg_key_servers() { + declare -A keyservers_curl_map=( + ["hkps://keyserver.ubuntu.com"]="https://keyserver.ubuntu.com" + ["hkps://keys.openpgp.org"]="https://keys.openpgp.org" + ["hkps://keyserver.pgp.com"]="https://keyserver.pgp.com" + ) + + local curl_args="" + local keyserver_reachable=false # Flag to indicate if any keyserver is reachable + + if [ ! -z "${KEYSERVER_PROXY}" ]; then + curl_args="--proxy ${KEYSERVER_PROXY}" + fi + + for keyserver in "${!keyservers_curl_map[@]}"; do + local keyserver_curl_url="${keyservers_curl_map[${keyserver}]}" + if curl -s ${curl_args} --max-time 5 ${keyserver_curl_url} > /dev/null; then + echo "keyserver ${keyserver}" + keyserver_reachable=true + else + echo "(*) Keyserver ${keyserver} is not reachable." >&2 + fi + done + + if ! $keyserver_reachable; then + echo "(!) No keyserver is reachable." >&2 + exit 1 + fi +} + # Import the specified key in a variable name passed in as receive_gpg_keys() { local keys=${!1} @@ -55,14 +98,34 @@ receive_gpg_keys() { keyring_args="--no-default-keyring --keyring $2" fi if [ ! -z "${KEYSERVER_PROXY}" ]; then - keyring_args="${keyring_args} --keyserver-options http-proxy=${KEYSERVER_PROXY}" + keyring_args="${keyring_args} --keyserver-options http-proxy=${KEYSERVER_PROXY}" + fi + + # Install curl + if ! type curl > /dev/null 2>&1; then + check_packages curl fi # Use a temporary location for gpg keys to avoid polluting image export GNUPGHOME="/tmp/tmp-gnupg" mkdir -p ${GNUPGHOME} chmod 700 ${GNUPGHOME} - echo -e "disable-ipv6\n${GPG_KEY_SERVERS}" > ${GNUPGHOME}/dirmngr.conf + + # Special handling for HashiCorp GPG key on Ubuntu Noble + if [ "$IS_GPG_NEW" -eq 1 ] && [ "$keys" = "$TERRAFORM_GPG_KEY" ]; then + echo "(*) Ubuntu Noble detected, using Keybase for HashiCorp GPG key import...." + curl -fsSL https://keybase.io/hashicorp/pgp_keys.asc | gpg --import + if ! gpg --list-keys "${TERRAFORM_GPG_KEY}" > /dev/null 2>&1; then + gpg --list-keys + echo "(*) Warning: HashiCorp GPG key not found in keyring after import." + echo " Continuing installation without GPG verification on Ubuntu Noble." + echo " This is expected behavior for Ubuntu Noble due to keyserver issues." + return 1 # Return failure to indicate GPG verification should be skipped + fi + return 0 + fi + + echo -e "disable-ipv6\n$(get_gpg_key_servers)" > ${GNUPGHOME}/dirmngr.conf # GPG key download sometimes fails for some reason and retrying fixes it. local retry_count=0 local gpg_ok="false" @@ -72,11 +135,30 @@ receive_gpg_keys() { echo "(*) Downloading GPG key..." ( echo "${keys}" | xargs -n 1 gpg -q ${keyring_args} --recv-keys) 2>&1 && gpg_ok="true" if [ "${gpg_ok}" != "true" ]; then - echo "(*) Failed getting key, retring in 10s..." + echo "(*) Failed getting key, retrying in 10s..." (( retry_count++ )) sleep 10s fi done + + # If all attempts fail, try getting the keyserver IP address and explicitly passing it to gpg + if [ "${gpg_ok}" = "false" ]; then + retry_count=0; + echo "(*) Resolving GPG keyserver IP address..." + local keyserver_ip_address=$( dig +short keyserver.ubuntu.com | head -n1 ) + echo "(*) GPG keyserver IP address $keyserver_ip_address" + + until [ "${gpg_ok}" = "true" ] || [ "${retry_count}" -eq "3" ]; + do + echo "(*) Downloading GPG key..." + ( echo "${keys}" | xargs -n 1 gpg -q ${keyring_args} --recv-keys --keyserver ${keyserver_ip_address}) 2>&1 && gpg_ok="true" + if [ "${gpg_ok}" != "true" ]; then + echo "(*) Failed getting key, retrying in 10s..." + (( retry_count++ )) + sleep 10s + fi + done + fi set -e if [ "${gpg_ok}" = "false" ]; then echo "(!) Failed to get gpg key." @@ -118,6 +200,47 @@ find_version_from_git_tags() { echo "${variable_name}=${!variable_name}" } +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + find_sentinel_version_from_url() { local variable_name=$1 local requested_version=${!variable_name} @@ -158,6 +281,73 @@ check_packages() { fi } +# Function to fetch the version released prior to the latest version +get_previous_version() { + local url=$1 + local repo_url=$2 + local variable_name=$3 + prev_version=${!variable_name} + + output=$(curl -s "$repo_url"); + + # install jq + check_packages jq + + message=$(echo "$output" | jq -r '.message') + + if [[ $message == "API rate limit exceeded"* ]]; then + echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}" + echo -e "\nAttempting to find latest version using GitHub tags." + find_prev_version_from_git_tags prev_version "$url" "tags/v" + declare -g ${variable_name}="${prev_version}" + else + echo -e "\nAttempting to find latest version using GitHub Api." + version=$(echo "$output" | jq -r '.tag_name') + declare -g ${variable_name}="${version#v}" + fi + echo "${variable_name}=${!variable_name}" +} + +get_github_api_repo_url() { + local url=$1 + echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases/latest" +} + +install_previous_version() { + given_version=$1 + requested_version=${!given_version} + local URL=$2 + INSTALLER_FN=$3 + local REPO_URL=$(get_github_api_repo_url "$URL") + local PKG_NAME=$(get_pkg_name "${given_version}") + echo -e "\n(!) Failed to fetch the latest artifacts for ${PKG_NAME} v${requested_version}..." + get_previous_version "$URL" "$REPO_URL" requested_version + echo -e "\nAttempting to install ${requested_version}" + declare -g ${given_version}="${requested_version#v}" + $INSTALLER_FN "${!given_version}" + echo "${given_version}=${!given_version}" +} + +install_cosign() { + COSIGN_VERSION=$1 + local URL=$2 + cosign_filename="/tmp/cosign_${COSIGN_VERSION}_${architecture}.deb" + cosign_url="https://github.com/sigstore/cosign/releases/latest/download/cosign_${COSIGN_VERSION}_${architecture}.deb" + curl -L "${cosign_url}" -o $cosign_filename + if grep -q "Not Found" "$cosign_filename"; then + echo -e "\n(!) Failed to fetch the latest artifacts for cosign v${COSIGN_VERSION}..." + REPO_URL=$(get_github_api_repo_url "$URL") + get_previous_version "$URL" "$REPO_URL" COSIGN_VERSION + echo -e "\nAttempting to install ${COSIGN_VERSION}" + cosign_filename="/tmp/cosign_${COSIGN_VERSION}_${architecture}.deb" + cosign_url="https://github.com/sigstore/cosign/releases/latest/download/cosign_${COSIGN_VERSION}_${architecture}.deb" + curl -L "${cosign_url}" -o $cosign_filename + fi + dpkg -i $cosign_filename + rm $cosign_filename + echo "Installation of cosign succeeded with ${COSIGN_VERSION}." +} + # Install 'cosign' for validating signatures # https://docs.sigstore.dev/cosign/overview/ ensure_cosign() { @@ -165,12 +355,10 @@ ensure_cosign() { if ! type cosign > /dev/null 2>&1; then echo "Installing cosign..." - LATEST_COSIGN_VERSION="latest" - find_version_from_git_tags LATEST_COSIGN_VERSION 'https://github.com/sigstore/cosign' - curl -L "https://github.com/sigstore/cosign/releases/latest/download/cosign_${LATEST_COSIGN_VERSION}_${architecture}.deb" -o /tmp/cosign_${LATEST_COSIGN_VERSION}_${architecture}.deb - - dpkg -i /tmp/cosign_${LATEST_COSIGN_VERSION}_${architecture}.deb - rm /tmp/cosign_${LATEST_COSIGN_VERSION}_${architecture}.deb + COSIGN_VERSION="latest" + cosign_url='https://github.com/sigstore/cosign' + find_version_from_git_tags COSIGN_VERSION "${cosign_url}" + install_cosign "${COSIGN_VERSION}" "${cosign_url}" fi if ! type cosign > /dev/null 2>&1; then echo "(!) Failed to install cosign." @@ -183,29 +371,82 @@ ensure_cosign() { export DEBIAN_FRONTEND=noninteractive # Install dependencies if missing -check_packages curl ca-certificates gnupg2 dirmngr coreutils unzip +check_packages curl ca-certificates gnupg2 dirmngr coreutils unzip dnsutils if ! type git > /dev/null 2>&1; then check_packages git fi +terraform_url='https://github.com/hashicorp/terraform' +tflint_url='https://github.com/terraform-linters/tflint' +terragrunt_url='https://github.com/gruntwork-io/terragrunt' # Verify requested version is available, convert latest -find_version_from_git_tags TERRAFORM_VERSION 'https://github.com/hashicorp/terraform' -find_version_from_git_tags TFLINT_VERSION 'https://github.com/terraform-linters/tflint' -find_version_from_git_tags TERRAGRUNT_VERSION 'https://github.com/gruntwork-io/terragrunt' +find_version_from_git_tags TERRAFORM_VERSION "$terraform_url" +find_version_from_git_tags TFLINT_VERSION "$tflint_url" +find_version_from_git_tags TERRAGRUNT_VERSION "$terragrunt_url" + +install_terraform() { + local TERRAFORM_VERSION=$1 + terraform_filename="terraform_${TERRAFORM_VERSION}_linux_${architecture}.zip" + curl -sSL -o ${terraform_filename} "${HASHICORP_RELEASES_URL}/terraform/${TERRAFORM_VERSION}/${terraform_filename}" +} + +verify_signature() { + local gpg_key=$1 + local sha256sums_url=$2 + local sig_url=$3 + local sha256sums_file=$4 + local sig_file=$5 + local verify_result=0 + + receive_gpg_keys "$gpg_key" + verify_result=$? + if [ $verify_result -ne 0 ] && [ "$IS_GPG_NEW" -eq 1 ]; then + echo "Skipping the gpg key validation for ubuntu noble as unable to import the key." + return 1 + fi + curl -sSL -o "$sha256sums_file" "$sha256sums_url" + curl -sSL -o "$sig_file" "$sig_url" + + # Try GPG verification, but don't fail on Noble + gpg --verify "$sig_file" "$sha256sums_file" + verify_result=$? + if [ $verify_result -ne 0 ]; then + echo "(!) GPG verification failed." + exit 1 + fi +} mkdir -p /tmp/tf-downloads cd /tmp/tf-downloads - # Install Terraform, tflint, Terragrunt echo "Downloading terraform..." terraform_filename="terraform_${TERRAFORM_VERSION}_linux_${architecture}.zip" -curl -sSL -o ${terraform_filename} "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/${terraform_filename}" +install_terraform "$TERRAFORM_VERSION" +if grep -q "The specified key does not exist." "${terraform_filename}"; then + install_previous_version TERRAFORM_VERSION $terraform_url "install_terraform" + terraform_filename="terraform_${TERRAFORM_VERSION}_linux_${architecture}.zip" +fi if [ "${TERRAFORM_SHA256}" != "dev-mode" ]; then if [ "${TERRAFORM_SHA256}" = "automatic" ]; then - receive_gpg_keys TERRAFORM_GPG_KEY - curl -sSL -o terraform_SHA256SUMS https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_SHA256SUMS - curl -sSL -o terraform_SHA256SUMS.sig https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_SHA256SUMS.${TERRAFORM_GPG_KEY}.sig - gpg --verify terraform_SHA256SUMS.sig terraform_SHA256SUMS + # For Ubuntu Noble, try GPG verification but continue if it fails + if [ "$IS_GPG_NEW" -eq 1 ]; then + echo "(*) Ubuntu Noble detected - attempting GPG verification with fallback..." + set +e + sha256sums_url="${HASHICORP_RELEASES_URL}/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_SHA256SUMS" + sig_url="${HASHICORP_RELEASES_URL}/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_SHA256SUMS.${TERRAFORM_GPG_KEY}.sig" + verify_signature TERRAFORM_GPG_KEY "$sha256sums_url" "$sig_url" "terraform_SHA256SUMS" "terraform_SHA256SUMS.sig" + verify_result=$? + set -e + if [ $verify_result -ne 0 ]; then + echo "(*) GPG verification failed on Ubuntu Noble, but continuing installation." + echo " Downloading checksums for basic integrity check..." + curl -sSL -o terraform_SHA256SUMS "${HASHICORP_RELEASES_URL}/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_SHA256SUMS" + fi + else + sha256sums_url="${HASHICORP_RELEASES_URL}/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_SHA256SUMS" + sig_url="${HASHICORP_RELEASES_URL}/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_SHA256SUMS.${TERRAFORM_GPG_KEY}.sig" + verify_signature TERRAFORM_GPG_KEY "$sha256sums_url" "$sig_url" "terraform_SHA256SUMS" "terraform_SHA256SUMS.sig" + fi else echo "${TERRAFORM_SHA256} *${terraform_filename}" > terraform_SHA256SUMS fi @@ -214,10 +455,18 @@ fi unzip ${terraform_filename} mv -f terraform /usr/local/bin/ +install_tflint() { + TFLINT_VERSION=$1 + curl -sSL -o /tmp/tf-downloads/${TFLINT_FILENAME} https://github.com/terraform-linters/tflint/releases/download/v${TFLINT_VERSION}/${TFLINT_FILENAME} +} + if [ "${TFLINT_VERSION}" != "none" ]; then echo "Downloading tflint..." TFLINT_FILENAME="tflint_linux_${architecture}.zip" - curl -sSL -o /tmp/tf-downloads/${TFLINT_FILENAME} https://github.com/terraform-linters/tflint/releases/download/v${TFLINT_VERSION}/${TFLINT_FILENAME} + install_tflint "$TFLINT_VERSION" + if grep -q "Not Found" "/tmp/tf-downloads/${TFLINT_FILENAME}"; then + install_previous_version TFLINT_VERSION "$tflint_url" "install_tflint" + fi if [ "${TFLINT_SHA256}" != "dev-mode" ]; then if [ "${TFLINT_SHA256}" != "automatic" ]; then @@ -258,10 +507,20 @@ if [ "${TFLINT_VERSION}" != "none" ]; then unzip /tmp/tf-downloads/${TFLINT_FILENAME} mv -f tflint /usr/local/bin/ fi + +install_terragrunt() { + TERRAGRUNT_VERSION=$1 + curl -sSL -o /tmp/tf-downloads/${terragrunt_filename} https://github.com/gruntwork-io/terragrunt/releases/download/v${TERRAGRUNT_VERSION}/${terragrunt_filename} +} + if [ "${TERRAGRUNT_VERSION}" != "none" ]; then echo "Downloading Terragrunt..." terragrunt_filename="terragrunt_linux_${architecture}" - curl -sSL -o /tmp/tf-downloads/${terragrunt_filename} https://github.com/gruntwork-io/terragrunt/releases/download/v${TERRAGRUNT_VERSION}/${terragrunt_filename} + install_terragrunt "$TERRAGRUNT_VERSION" + output=$(cat "/tmp/tf-downloads/${terragrunt_filename}") + if [[ $output == "Not Found" ]]; then + install_previous_version TERRAGRUNT_VERSION $terragrunt_url "install_terragrunt" + fi if [ "${TERRAGRUNT_SHA256}" != "dev-mode" ]; then if [ "${TERRAGRUNT_SHA256}" = "automatic" ]; then curl -sSL -o terragrunt_SHA256SUMS https://github.com/gruntwork-io/terragrunt/releases/download/v${TERRAGRUNT_VERSION}/SHA256SUMS @@ -276,19 +535,34 @@ fi if [ "${INSTALL_SENTINEL}" = "true" ]; then SENTINEL_VERSION="latest" - sentinel_releases_url='https://releases.hashicorp.com/sentinel' + sentinel_releases_url="${HASHICORP_RELEASES_URL}/sentinel" find_sentinel_version_from_url SENTINEL_VERSION ${sentinel_releases_url} sentinel_filename="sentinel_${SENTINEL_VERSION}_linux_${architecture}.zip" echo "(*) Downloading Sentinel... ${sentinel_filename}" curl -sSL -o /tmp/tf-downloads/${sentinel_filename} ${sentinel_releases_url}/${SENTINEL_VERSION}/${sentinel_filename} if [ "${SENTINEL_SHA256}" != "dev-mode" ]; then if [ "${SENTINEL_SHA256}" = "automatic" ]; then - receive_gpg_keys TERRAFORM_GPG_KEY - curl -sSL -o sentinel_checksums.txt ${sentinel_releases_url}/${SENTINEL_VERSION}/sentinel_${SENTINEL_VERSION}_SHA256SUMS - curl -sSL -o sentinel_checksums.txt.sig ${sentinel_releases_url}/${SENTINEL_VERSION}/sentinel_${SENTINEL_VERSION}_SHA256SUMS.${TERRAFORM_GPG_KEY}.sig - gpg --verify sentinel_checksums.txt.sig sentinel_checksums.txt + # For Ubuntu Noble, try GPG verification but continue if it fails + if [ "$IS_GPG_NEW" -eq 1 ]; then + echo "(*) Ubuntu Noble detected - attempting Sentinel GPG verification with fallback..." + set +e + sha256sums_url="${sentinel_releases_url}/${SENTINEL_VERSION}/sentinel_${SENTINEL_VERSION}_SHA256SUMS" + sig_url="${sentinel_releases_url}/${SENTINEL_VERSION}/sentinel_${SENTINEL_VERSION}_SHA256SUMS.${TERRAFORM_GPG_KEY}.sig" + verify_signature TERRAFORM_GPG_KEY "$sha256sums_url" "$sig_url" "sentinel_checksums.txt" "sentinel_checksums.txt.sig" + verify_result=$? + set -e + if [ $verify_result -ne 0 ]; then + echo "(*) GPG verification failed on Ubuntu Noble, but continuing installation." + echo " Downloading checksums for basic integrity check..." + curl -sSL -o sentinel_checksums.txt "${sentinel_releases_url}/${SENTINEL_VERSION}/sentinel_${SENTINEL_VERSION}_SHA256SUMS" + fi + else + sha256sums_url="${sentinel_releases_url}/${SENTINEL_VERSION}/sentinel_${SENTINEL_VERSION}_SHA256SUMS" + sig_url="${sentinel_releases_url}/${SENTINEL_VERSION}/sentinel_${SENTINEL_VERSION}_SHA256SUMS.${TERRAFORM_GPG_KEY}.sig" + verify_signature TERRAFORM_GPG_KEY "$sha256sums_url" "$sig_url" "sentinel_checksums.txt" "sentinel_checksums.txt.sig" + fi # Verify the SHASUM matches the archive - shasum -a 256 --ignore-missing -c sentinel_checksums.txt + shasum -a 256 --ignore-missing -c sentinel_checksums.txt else echo "${SENTINEL_SHA256} *${SENTINEL_FILENAME}" >sentinel_checksums.txt fi @@ -299,12 +573,23 @@ if [ "${INSTALL_SENTINEL}" = "true" ]; then mv -f /tmp/tf-downloads/sentinel /usr/local/bin/sentinel fi +install_tfsec() { + local TFSEC_VERSION=$1 + tfsec_filename="tfsec_${TFSEC_VERSION}_linux_${architecture}.tar.gz" + curl -sSL -o /tmp/tf-downloads/${tfsec_filename} https://github.com/aquasecurity/tfsec/releases/download/v${TFSEC_VERSION}/${tfsec_filename} +} + if [ "${INSTALL_TFSEC}" = "true" ]; then TFSEC_VERSION="latest" - find_version_from_git_tags TFSEC_VERSION 'https://github.com/aquasecurity/tfsec' + tfsec_url='https://github.com/aquasecurity/tfsec' + find_version_from_git_tags TFSEC_VERSION $tfsec_url tfsec_filename="tfsec_${TFSEC_VERSION}_linux_${architecture}.tar.gz" echo "(*) Downloading TFSec... ${tfsec_filename}" - curl -sSL -o /tmp/tf-downloads/${tfsec_filename} https://github.com/aquasecurity/tfsec/releases/download/v${TFSEC_VERSION}/${tfsec_filename} + install_tfsec "$TFSEC_VERSION" + if grep -q "Not Found" "/tmp/tf-downloads/${tfsec_filename}"; then + install_previous_version TFSEC_VERSION $tfsec_url "install_tfsec" + tfsec_filename="tfsec_${TFSEC_VERSION}_linux_${architecture}.tar.gz" + fi if [ "${TFSEC_SHA256}" != "dev-mode" ]; then if [ "${TFSEC_SHA256}" = "automatic" ]; then curl -sSL -o tfsec_SHA256SUMS https://github.com/aquasecurity/tfsec/releases/download/v${TFSEC_VERSION}/tfsec_${TFSEC_VERSION}_checksums.txt @@ -319,12 +604,23 @@ if [ "${INSTALL_TFSEC}" = "true" ]; then mv -f /tmp/tf-downloads/tfsec/tfsec /usr/local/bin/tfsec fi +install_terraform_docs() { + local TERRAFORM_DOCS_VERSION=$1 + tfdocs_filename="terraform-docs-v${TERRAFORM_DOCS_VERSION}-linux-${architecture}.tar.gz" + curl -sSL -o /tmp/tf-downloads/${tfdocs_filename} https://github.com/terraform-docs/terraform-docs/releases/download/v${TERRAFORM_DOCS_VERSION}/${tfdocs_filename} +} + if [ "${INSTALL_TERRAFORM_DOCS}" = "true" ]; then TERRAFORM_DOCS_VERSION="latest" - find_version_from_git_tags TERRAFORM_DOCS_VERSION 'https://github.com/terraform-docs/terraform-docs' + terraform_docs_url='https://github.com/terraform-docs/terraform-docs' + find_version_from_git_tags TERRAFORM_DOCS_VERSION $terraform_docs_url tfdocs_filename="terraform-docs-v${TERRAFORM_DOCS_VERSION}-linux-${architecture}.tar.gz" echo "(*) Downloading Terraform docs... ${tfdocs_filename}" - curl -sSL -o /tmp/tf-downloads/${tfdocs_filename} https://github.com/terraform-docs/terraform-docs/releases/download/v${TERRAFORM_DOCS_VERSION}/${tfdocs_filename} + install_terraform_docs "$TERRAFORM_DOCS_VERSION" + if grep -q "Not Found" "/tmp/tf-downloads/${tfdocs_filename}"; then + install_previous_version TERRAFORM_DOCS_VERSION $terraform_docs_url "install_terraform_docs" + tfdocs_filename="terraform-docs-v${TERRAFORM_DOCS_VERSION}-linux-${architecture}.tar.gz" + fi if [ "${TERRAFORM_DOCS_SHA256}" != "dev-mode" ]; then if [ "${TERRAFORM_DOCS_SHA256}" = "automatic" ]; then curl -sSL -o tfdocs_SHA256SUMS https://github.com/terraform-docs/terraform-docs/releases/download/v${TERRAFORM_DOCS_VERSION}/terraform-docs-v${TERRAFORM_DOCS_VERSION}.sha256sum diff --git a/test/_global/scenarios.json b/test/_global/scenarios.json index 2228c7d6e..184f8e87b 100644 --- a/test/_global/scenarios.json +++ b/test/_global/scenarios.json @@ -1,6 +1,6 @@ { "all_the_clis": { - "image": "ubuntu:focal", + "image": "ubuntu:noble", "features": { "aws-cli": {}, "azure-cli": {}, @@ -8,7 +8,7 @@ } }, "node_java_rust": { - "image": "ubuntu:focal", + "image": "ubuntu:noble", "features": { "node": {}, "java": {}, diff --git a/test/anaconda/install_anaconda_almalinux8.sh b/test/anaconda/install_anaconda_almalinux8.sh new file mode 100644 index 000000000..416d32a2b --- /dev/null +++ b/test/anaconda/install_anaconda_almalinux8.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + + diff --git a/test/anaconda/install_anaconda_almalinux9.sh b/test/anaconda/install_anaconda_almalinux9.sh new file mode 100644 index 000000000..416d32a2b --- /dev/null +++ b/test/anaconda/install_anaconda_almalinux9.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + + diff --git a/test/anaconda/install_anaconda_bookworm.sh b/test/anaconda/install_anaconda_bookworm.sh new file mode 100644 index 000000000..4a17f3dc1 --- /dev/null +++ b/test/anaconda/install_anaconda_bookworm.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + diff --git a/test/anaconda/install_anaconda_bullseye.sh b/test/anaconda/install_anaconda_bullseye.sh new file mode 100644 index 000000000..4a17f3dc1 --- /dev/null +++ b/test/anaconda/install_anaconda_bullseye.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + diff --git a/test/anaconda/install_anaconda_fedora.sh b/test/anaconda/install_anaconda_fedora.sh new file mode 100644 index 000000000..416d32a2b --- /dev/null +++ b/test/anaconda/install_anaconda_fedora.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + + diff --git a/test/anaconda/install_anaconda_jammy.sh b/test/anaconda/install_anaconda_jammy.sh new file mode 100644 index 000000000..4a17f3dc1 --- /dev/null +++ b/test/anaconda/install_anaconda_jammy.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + diff --git a/test/anaconda/install_anaconda_noble.sh b/test/anaconda/install_anaconda_noble.sh new file mode 100644 index 000000000..4a17f3dc1 --- /dev/null +++ b/test/anaconda/install_anaconda_noble.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + diff --git a/test/anaconda/install_anaconda_noble_without_user.sh b/test/anaconda/install_anaconda_noble_without_user.sh new file mode 100644 index 000000000..4a17f3dc1 --- /dev/null +++ b/test/anaconda/install_anaconda_noble_without_user.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + diff --git a/test/anaconda/install_anaconda_rockylinux8.sh b/test/anaconda/install_anaconda_rockylinux8.sh new file mode 100644 index 000000000..416d32a2b --- /dev/null +++ b/test/anaconda/install_anaconda_rockylinux8.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + + diff --git a/test/anaconda/install_anaconda_rockylinux9.sh b/test/anaconda/install_anaconda_rockylinux9.sh new file mode 100644 index 000000000..d71085c4c --- /dev/null +++ b/test/anaconda/install_anaconda_rockylinux9.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "conda" conda --version +check "python" python --version +check "pylint" pylint --version +check "flake8" flake8 --version +check "autopep8" autopep8 --version +check "yapf" yapf --version +check "pydocstyle" pydocstyle --version +check "pycodestyle" pycodestyle --version +check "if conda-notice.txt exists" cat /usr/local/etc/vscode-dev-containers/conda-notice.txt + +check "certifi" pip show certifi | grep Version +check "cryptography" pip show cryptography | grep Version +check "setuptools" pip show setuptools | grep Version +check "tornado" pip show tornado | grep Version + +check "conda-update-conda" bash -c "conda update -y conda" +check "conda-install-tensorflow" bash -c "conda create --name test-env -c conda-forge --yes tensorflow" +check "conda-install-pytorch" bash -c "conda create --name test-env -c conda-forge --yes pytorch" + +# Report result +reportResults + + + diff --git a/test/anaconda/scenarios.json b/test/anaconda/scenarios.json new file mode 100644 index 000000000..65fba8458 --- /dev/null +++ b/test/anaconda/scenarios.json @@ -0,0 +1,86 @@ +{ + "install_anaconda_noble": { + "image": "mcr.microsoft.com/devcontainers/base:noble", + "user": "vscode", + "features": { + "anaconda": { + "version": "latest" + } + } + }, + "install_anaconda_jammy": { + "image": "mcr.microsoft.com/devcontainers/base:jammy", + "user": "vscode", + "features": { + "anaconda": { + "version": "latest" + } + } + }, + "install_anaconda_bookworm": { + "image": "mcr.microsoft.com/devcontainers/base:bookworm", + "user": "vscode", + "features": { + "anaconda": { + "version": "latest" + } + } + }, + "install_anaconda_bullseye": { + "image": "mcr.microsoft.com/devcontainers/base:bullseye", + "user": "vscode", + "features": { + "anaconda": { + "version": "latest" + } + } + }, + "install_anaconda_noble_without_user": { + "image": "mcr.microsoft.com/devcontainers/base:noble", + "features": { + "anaconda": { + "version": "latest" + } + } + }, + "install_anaconda_almalinux8": { + "image": "almalinux:8", + "features": { + "anaconda": { + "version": "latest" + } + } + }, + "install_anaconda_almalinux9": { + "image": "almalinux:9", + "features": { + "anaconda": { + "version": "latest" + } + } + }, + "install_anaconda_rockylinux8": { + "image": "rockylinux:8", + "features": { + "anaconda": { + "version": "latest" + } + } + }, + "install_anaconda_rockylinux9": { + "image": "rockylinux:9", + "features": { + "anaconda": { + "version": "latest" + } + } + }, + "install_anaconda_fedora": { + "image": "fedora", + "features": { + "anaconda": { + "version": "latest" + } + } + } +} diff --git a/test/aws-cli/checkBashCompletion.sh b/test/aws-cli/checkBashCompletion.sh new file mode 100755 index 000000000..376461b52 --- /dev/null +++ b/test/aws-cli/checkBashCompletion.sh @@ -0,0 +1,27 @@ +#!/bin/bash + +command=$1 +expected=$2 + +echo -e "Checking completion for command '$command'..." + +# Send command as a character stream, followed by two tab characters, into an interactive bash shell. +# Also note the 'y' which responds to the possible Bash question "Display all xxx possibilities? (y or n)". +# Bash produces the autocompletion output on stderr, so redirect that to stdout. +# The sed bit captures the lines between Header and Footer (used as output delimiters). +# The first grep removes the "Display all" message (that is atomatically answered to "y" by the script). +# The last grep filters the output to lines containing the expected result. +COMPLETE_OUTPUT=$(echo if false\; then "Header"\; $command$'\t'$'\t'y\; "Footer" fi | bash -i 2>&1 | sed -n '/Header/{:a;n;/Footer/q;p;ba}' | grep -v ^'Display all ') +echo -e "\nCompletion output:\n" +echo -e "$COMPLETE_OUTPUT" +echo -e "\n" + +FILTERED_COMPLETE_OUTPUT=$(echo "$COMPLETE_OUTPUT" | grep "$expected") + +if [ -z "$FILTERED_COMPLETE_OUTPUT" ]; then + echo -e "Completion output does not contains '$expected'." + exit 1 +else + echo -e "Completion output contains '$expected'." + exit 0 +fi diff --git a/test/aws-cli/less_installed.sh b/test/aws-cli/less_installed.sh new file mode 100644 index 000000000..a700a7a18 --- /dev/null +++ b/test/aws-cli/less_installed.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +# Import test library for `check` command +source dev-container-features-test-lib + +check "less is installed, pagination works !" less --version +check "less binary installation path" which less +check "Testing paginated output with less" ls -R / | less + +# Report result +reportResults \ No newline at end of file diff --git a/test/aws-cli/scenarios.json b/test/aws-cli/scenarios.json new file mode 100644 index 000000000..9dd703c27 --- /dev/null +++ b/test/aws-cli/scenarios.json @@ -0,0 +1,8 @@ +{ + "less_installed": { + "image": "mcr.microsoft.com/devcontainers/base:ubuntu", + "features": { + "aws-cli": {} + } + } +} \ No newline at end of file diff --git a/test/aws-cli/test.sh b/test/aws-cli/test.sh index d213eac38..45ef63db1 100755 --- a/test/aws-cli/test.sh +++ b/test/aws-cli/test.sh @@ -8,5 +8,15 @@ source dev-container-features-test-lib # Definition specific tests check "version" aws --version +# By default bash complete is disabled for the root user +# Enable it by replacing current ~/.bashrc with the /etc/skel/.bashrc file +mv ~/.bashrc ~/.bashrc.bak +cp /etc/skel/.bashrc ~/ + +check "aws-bash-completion-contains-version-option" ./checkBashCompletion.sh "aws --" "version" + +# Restore original ~/.bashrc +mv ~/.bashrc.bak ~/.bashrc + # Report result reportResults \ No newline at end of file diff --git a/test/azure-cli/install_bicep_trixie.sh b/test/azure-cli/install_bicep_trixie.sh new file mode 100644 index 000000000..28ff7e509 --- /dev/null +++ b/test/azure-cli/install_bicep_trixie.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -e + +# Import test library for `check` command +source dev-container-features-test-lib + +# Check to make sure the user is vscode +check "user is vscode" whoami | grep vscode + +check "version" az --version + +# Bicep-specific tests +check "bicep" bicep --version +check "az bicep" az bicep version + +# Report result +reportResults + diff --git a/test/azure-cli/install_extensions_trixie.sh b/test/azure-cli/install_extensions_trixie.sh new file mode 100644 index 000000000..fa55c2f5d --- /dev/null +++ b/test/azure-cli/install_extensions_trixie.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -e + +# Import test library for `check` command +source dev-container-features-test-lib + +./install_extensions.sh + diff --git a/test/azure-cli/install_using_python_with_python_3_11_bullseye.sh b/test/azure-cli/install_using_python_with_python_3_11_bullseye.sh new file mode 100644 index 000000000..b9957843e --- /dev/null +++ b/test/azure-cli/install_using_python_with_python_3_11_bullseye.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +# Import test library for `check` command +source dev-container-features-test-lib + +# Check to make sure the user is vscode +check "user is vscode" whoami | grep vscode +check "version" az --version + +echo -e "\n\n🔄 Testing 'O.S'" +if cat /etc/os-release | grep -q 'PRETTY_NAME="Debian GNU/Linux 11 (bullseye)"'; then + echo -e "\n\n✅ Passed 'O.S is Linux 11 (bullseye)'!" +else + echo -e "\n\n❌ Failed 'O.S is other than Linux 11 (bullseye)'!" +fi + + +# Report result +reportResults \ No newline at end of file diff --git a/test/azure-cli/install_with_python_3_12_bookworm.sh b/test/azure-cli/install_with_python_3_12_bookworm.sh new file mode 100644 index 000000000..2c8e1fd72 --- /dev/null +++ b/test/azure-cli/install_with_python_3_12_bookworm.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +set -e + +# Import test library for `check` command +source dev-container-features-test-lib + + +echo -e "\n🔄 Testing 'O.S'" +if cat /etc/os-release | grep -q 'PRETTY_NAME="Debian GNU/Linux 12 (bookworm)"'; then + echo -e "\n✅ Passed 'O.S is Linux 12 (bookworm)'!\n" +else + echo -e "\n❌ Failed 'O.S is other than Linux 12 (bookworm)'!\n" +fi + +# Check to make sure the user is vscode +check "user is vscode" whoami | grep vscode +check "version" az --version + +# Report result +reportResults \ No newline at end of file diff --git a/test/azure-cli/install_with_python_3_13_trixie.sh b/test/azure-cli/install_with_python_3_13_trixie.sh new file mode 100644 index 000000000..aac4f3c2d --- /dev/null +++ b/test/azure-cli/install_with_python_3_13_trixie.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +# Import test library for `check` command +source dev-container-features-test-lib + + +echo -e "\n🔄 Testing 'O.S'" +if cat /etc/os-release | grep -q 'PRETTY_NAME="Debian GNU/Linux 13 (trixie)"'; then + echo -e "\n✅ Passed 'O.S is Linux 13 (trixie)'!\n" +else + echo -e "\n❌ Failed 'O.S is other than Linux 13 (trixie)'!\n" +fi + +# Check to make sure the user is vscode +check "user is vscode" whoami | grep vscode +check "version" az --version + +# Report result +reportResults + diff --git a/test/azure-cli/scenarios.json b/test/azure-cli/scenarios.json index b7a732395..c3a205e68 100644 --- a/test/azure-cli/scenarios.json +++ b/test/azure-cli/scenarios.json @@ -1,4 +1,14 @@ { + "install_extensions_trixie": { + "image": "mcr.microsoft.com/devcontainers/base:trixie", + "user": "vscode", + "features": { + "azure-cli": { + "version": "latest", + "extensions": "aks-preview,amg,containerapp" + } + } + }, "install_extensions": { "image": "mcr.microsoft.com/devcontainers/base:jammy", "user": "vscode", @@ -28,6 +38,16 @@ "installBicep": true } } + }, + "install_bicep_trixie": { + "image": "mcr.microsoft.com/devcontainers/base:trixie", + "user": "vscode", + "features": { + "azure-cli": { + "version": "latest", + "installBicep": true + } + } }, "install_with_python": { "image": "mcr.microsoft.com/devcontainers/base:jammy", @@ -38,5 +58,34 @@ "installUsingPython": true } } + }, + "install_with_python_3_13_trixie": { + "image": "mcr.microsoft.com/devcontainers/python:2-3.13-trixie", + "user": "vscode", + "features": { + "azure-cli": { + "version": "latest", + "installUsingPython": true + } + } + }, + "install_with_python_3_12_bookworm": { + "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bookworm", + "user": "vscode", + "features": { + "azure-cli": { + "version": "latest" + } + } + }, + "install_using_python_with_python_3_11_bullseye": { + "image": "mcr.microsoft.com/devcontainers/python:1-3.11-bullseye", + "user": "vscode", + "features": { + "azure-cli": { + "version": "latest", + "installUsingPython": "true" + } + } } } \ No newline at end of file diff --git a/test/common-utils/Azure-linux-CU.sh b/test/common-utils/Azure-linux-CU.sh new file mode 100644 index 000000000..eeba25d10 --- /dev/null +++ b/test/common-utils/Azure-linux-CU.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Load Linux distribution info +. /etc/os-release + +# Check if the current user is root +check "root user" test "$(whoami)" = "root" + +# Check if the Linux distro is Azure Linux +check "azurelinux distro" test "$ID" = "azurelinux" + +# Definition specific tests +check "curl" curl --version +check "jq" jq --version + +# Report result +reportResults diff --git a/test/common-utils/alma-8-minimal.sh b/test/common-utils/alma-8-minimal.sh new file mode 100755 index 000000000..d08731824 --- /dev/null +++ b/test/common-utils/alma-8-minimal.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${PLATFORM_ID}" = "platform:el8" +check "curl" curl --version +check "jq" jq --version + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/alma-8.sh b/test/common-utils/alma-8.sh new file mode 100755 index 000000000..d08731824 --- /dev/null +++ b/test/common-utils/alma-8.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${PLATFORM_ID}" = "platform:el8" +check "curl" curl --version +check "jq" jq --version + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/alma-9-minimal.sh b/test/common-utils/alma-9-minimal.sh new file mode 100755 index 000000000..cb2b339e1 --- /dev/null +++ b/test/common-utils/alma-9-minimal.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${PLATFORM_ID}" = "platform:el9" +check "curl" curl --version +check "jq" jq --version + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/alma-9.sh b/test/common-utils/alma-9.sh new file mode 100755 index 000000000..cb2b339e1 --- /dev/null +++ b/test/common-utils/alma-9.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${PLATFORM_ID}" = "platform:el9" +check "curl" curl --version +check "jq" jq --version + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/alma-minimal-8.sh b/test/common-utils/alma-minimal-8.sh new file mode 100755 index 000000000..cb2b339e1 --- /dev/null +++ b/test/common-utils/alma-minimal-8.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${PLATFORM_ID}" = "platform:el9" +check "curl" curl --version +check "jq" jq --version + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/alma-minimal-9.sh b/test/common-utils/alma-minimal-9.sh new file mode 100755 index 000000000..cb2b339e1 --- /dev/null +++ b/test/common-utils/alma-minimal-9.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${PLATFORM_ID}" = "platform:el9" +check "curl" curl --version +check "jq" jq --version + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/alpine-3-18.sh b/test/common-utils/alpine-3-18.sh new file mode 100755 index 000000000..eaf384b9e --- /dev/null +++ b/test/common-utils/alpine-3-18.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${ID}" = "alpine" +check "bashrc" ls /etc/bash/bashrc +check "libssl1.1 is installed" grep "libssl1.1" <(apk list --no-cache libssl1.1) + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/bookworm.sh b/test/common-utils/bookworm.sh new file mode 100755 index 000000000..db8628713 --- /dev/null +++ b/test/common-utils/bookworm.sh @@ -0,0 +1,45 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +FAILED=() +echoStderr() +{ + echo "$@" 1>&2 +} + +checkOSPackages() { + LABEL=$1 + shift + echo -e "\n🧪 Testing $LABEL" + if dpkg-query --show -f='${Package}: ${Version}\n' "$@"; then + echo "✅ Passed!" + return 0 + else + echoStderr "❌ $LABEL check failed." + FAILED+=("$LABEL") + return 1 + fi +} + +checkCommon() +{ + PACKAGE_LIST="manpages-posix \ + manpages-posix-dev" + + checkOSPackages "Installation of manpages-posix and manpages-posix-dev (non-free)" ${PACKAGE_LIST} +} + +# Check for manpages-posix, manpages-posix-dev non-free packages +checkCommon + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${VERSION_CODENAME}" = "bookworm" + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/configure_zsh_as_default_shell.sh b/test/common-utils/configure_zsh_as_default_shell.sh index 6a569a990..21b12101e 100644 --- a/test/common-utils/configure_zsh_as_default_shell.sh +++ b/test/common-utils/configure_zsh_as_default_shell.sh @@ -9,6 +9,9 @@ source dev-container-features-test-lib check "default-shell-is-zsh" bash -c "getent passwd $(whoami) | awk -F: '{ print $7 }' | grep '/bin/zsh'" # check it overrides the ~/.zshrc with default dev containers template check "default-zshrc-is-dev-container-template" bash -c "cat ~/.zshrc | grep ZSH_THEME | grep devcontainers" +check "zsh-path-contains-local-bin" zsh -l -c "echo $PATH | grep '/home/devcontainer/.local/bin'" + +check "Ensure .zprofile is owned by remoteUser" bash -c "stat -c '%U' /home/devcontainer/.zprofile | grep devcontainer" # Report result reportResults diff --git a/test/common-utils/devcontainer-custom-home.sh b/test/common-utils/devcontainer-custom-home.sh new file mode 100644 index 000000000..9b64cc0bf --- /dev/null +++ b/test/common-utils/devcontainer-custom-home.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "user is customUser" grep customUser <(whoami) +check "home is /customHome" grep "/customHome" <(getent passwd customUser | cut -d: -f6) + +# Report result +reportResults diff --git a/test/common-utils/devcontainer-custom-home/Dockerfile b/test/common-utils/devcontainer-custom-home/Dockerfile new file mode 100644 index 000000000..824e1e343 --- /dev/null +++ b/test/common-utils/devcontainer-custom-home/Dockerfile @@ -0,0 +1,4 @@ +FROM ubuntu:noble + +RUN groupadd customUser -g 30000 && \ + useradd customUser -u 30000 -g 30000 --create-home --home-dir /customHome diff --git a/test/common-utils/devcontainer-custom-user-default-home.sh b/test/common-utils/devcontainer-custom-user-default-home.sh new file mode 100644 index 000000000..f29bd7c74 --- /dev/null +++ b/test/common-utils/devcontainer-custom-user-default-home.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "user is customUser" grep customUser <(whoami) +check "home is /home/customUser" grep "/home/customUser" <(getent passwd customUser | cut -d: -f6) + +# Report result +reportResults diff --git a/test/common-utils/devcontainer-info/Dockerfile b/test/common-utils/devcontainer-info/Dockerfile index 28247d907..07d7a1caf 100644 --- a/test/common-utils/devcontainer-info/Dockerfile +++ b/test/common-utils/devcontainer-info/Dockerfile @@ -1,3 +1,9 @@ -FROM ubuntu:focal +FROM ubuntu:noble COPY meta.env /usr/local/etc/dev-containers/meta.env + +RUN if id "ubuntu" &>/dev/null; then \ + echo "Deleting user 'ubuntu' for noble" && userdel -f -r ubuntu || echo "Failed to delete ubuntu user for noble"; \ + else \ + echo "User 'ubuntu' does not exist for noble"; \ + fi diff --git a/test/common-utils/devcontainer-ruby-zshrc.sh b/test/common-utils/devcontainer-ruby-zshrc.sh new file mode 100644 index 000000000..9e7b26e4c --- /dev/null +++ b/test/common-utils/devcontainer-ruby-zshrc.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "configure-zshrc-without-overwrite" bash -c "grep 'rbenv init -' ~/.zshrc" + +# Report result +reportResults diff --git a/test/common-utils/devcontainer-ruby-zshrc/Dockerfile b/test/common-utils/devcontainer-ruby-zshrc/Dockerfile new file mode 100644 index 000000000..c5e7c69c4 --- /dev/null +++ b/test/common-utils/devcontainer-ruby-zshrc/Dockerfile @@ -0,0 +1,6 @@ +FROM mcr.microsoft.com/devcontainers/ruby:3.2 + +USER vscode +ENV USER=vscode +RUN echo 'eval "$(rbenv init -)"' >> /home/$USER/.zshrc + diff --git a/test/common-utils/no-terminal-title-by-default.sh b/test/common-utils/no-terminal-title-by-default.sh new file mode 100755 index 000000000..83fe7d221 --- /dev/null +++ b/test/common-utils/no-terminal-title-by-default.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release + +# Make sure bashrc is applied +source /root/.bashrc + +check "check_term_is_not_set" test !"$TERM" +check "check_prompt_command_not_set" test !"$PROMPT_COMMAND" + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/buster.sh b/test/common-utils/noble.sh old mode 100755 new mode 100644 similarity index 75% rename from test/common-utils/buster.sh rename to test/common-utils/noble.sh index 9fd2f409f..78ad146e3 --- a/test/common-utils/buster.sh +++ b/test/common-utils/noble.sh @@ -8,7 +8,8 @@ source dev-container-features-test-lib # Definition specific tests . /etc/os-release check "non-root user" test "$(whoami)" = "devcontainer" -check "distro" test "${VERSION_CODENAME}" = "buster" +check "distro" test "${VERSION_CODENAME}" = "noble" # Report result -reportResults \ No newline at end of file +reportResults + diff --git a/test/common-utils/rocky-8-minimal.sh b/test/common-utils/rocky-8-minimal.sh new file mode 100755 index 000000000..d08731824 --- /dev/null +++ b/test/common-utils/rocky-8-minimal.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${PLATFORM_ID}" = "platform:el8" +check "curl" curl --version +check "jq" jq --version + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/rocky-9-minimal.sh b/test/common-utils/rocky-9-minimal.sh new file mode 100755 index 000000000..cb2b339e1 --- /dev/null +++ b/test/common-utils/rocky-9-minimal.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release +check "non-root user" test "$(whoami)" = "devcontainer" +check "distro" test "${PLATFORM_ID}" = "platform:el9" +check "curl" curl --version +check "jq" jq --version + +# Report result +reportResults \ No newline at end of file diff --git a/test/common-utils/scenarios.json b/test/common-utils/scenarios.json index d7b296d41..ee138ca22 100644 --- a/test/common-utils/scenarios.json +++ b/test/common-utils/scenarios.json @@ -1,41 +1,64 @@ { - "bionic": { - "image": "ubuntu:bionic", + "jammy": { + "image": "ubuntu:jammy", "remoteUser": "devcontainer", "features": { "common-utils": {} } }, - "focal": { - "image": "ubuntu:focal", + "noble": { + "image": "ubuntu:noble", + "remoteUser": "devcontainer", + "features": { + "common-utils": {} + } + }, + "bullseye": { + "image": "debian:bullseye", "remoteUser": "devcontainer", "features": { "common-utils": {} } }, - "jammy": { - "image": "ubuntu:jammy", + "bookworm": { + "image": "debian:bookworm", + "remoteUser": "devcontainer", + "features": { + "common-utils": { + "nonFreePackages": "true" + } + } + }, + "centos-7": { + "image": "centos:7", "remoteUser": "devcontainer", "features": { "common-utils": {} } }, - "buster": { - "image": "debian:buster", + "alma-8": { + "image": "almalinux:8", "remoteUser": "devcontainer", "features": { "common-utils": {} } }, - "bullseye": { - "image": "debian:bullseye", + "alma-9": { + "image": "almalinux:9", "remoteUser": "devcontainer", "features": { "common-utils": {} } }, - "centos-7": { - "image": "centos:7", + "alma-8-minimal": { + "image": "almalinux:8-minimal", + "remoteUser": "devcontainer", + "features": { + "common-utils": {} + } + }, + "alma-9-minimal": { + "image": "almalinux:9-minimal", "remoteUser": "devcontainer", "features": { "common-utils": {} @@ -55,6 +78,20 @@ "common-utils": {} } }, + "rocky-8-minimal": { + "image": "rockylinux:8-minimal", + "remoteUser": "devcontainer", + "features": { + "common-utils": {} + } + }, + "rocky-9-minimal": { + "image": "rockylinux:9-minimal", + "remoteUser": "devcontainer", + "features": { + "common-utils": {} + } + }, "fedora": { "image": "fedora", "remoteUser": "devcontainer", @@ -108,12 +145,14 @@ } }, "configure_zsh_as_default_shell": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu", + "image": "ubuntu", "features": { "common-utils": { + "installZsh": true, "configureZshAsDefaultShell": true } - } + }, + "remoteUser": "devcontainer" }, "configure_zsh_no_template_second_step": { "image": "mcr.microsoft.com/devcontainers/base:ubuntu", @@ -160,6 +199,13 @@ "common-utils": {} } }, + "alpine-3-18": { + "image": "alpine:3.18", + "remoteUser": "devcontainer", + "features": { + "common-utils": {} + } + }, "devcontainer-info": { "build": { "dockerfile": "Dockerfile" @@ -175,6 +221,24 @@ } } }, + "devcontainer-ruby-zshrc": { + "build": { + "dockerfile": "Dockerfile" + }, + "remoteUser": "vscode", + "features": { + "common-utils": { + "installZsh": true, + "username": "vscode", + "userUid": "1000", + "userGid": "1000", + "upgradePackages": true, + "installOhMyZsh": true, + "installOhMyZshConfig": true, + "configureZshAsDefaultShell": true + } + } + }, "alpine-base-zsh-default": { "image": "mcr.microsoft.com/devcontainers/base:alpine", "remoteUser": "vscode", @@ -183,5 +247,42 @@ "configureZshAsDefaultShell": true } } + }, + "devcontainer-custom-home": { + "build": { + "dockerfile": "Dockerfile" + }, + "remoteUser": "customUser", + "features": { + "common-utils": {} + } + }, + "devcontainer-custom-user-default-home": { + "image": "mcr.microsoft.com/devcontainers/base:alpine", + "remoteUser": "customUser", + "features": { + "common-utils": {} + } + }, + "terminal-title-on-xterm": { + "image": "node", + "features": { + "common-utils": {} + }, + "containerEnv": { + "TERM": "xterm" + } + }, + "no-terminal-title-by-default": { + "image": "node", + "features": { + "common-utils": {} + } + }, + "Azure-linux-CU": { + "image": "mcr.microsoft.com/dotnet/sdk:8.0-azurelinux3.0", + "features": { + "common-utils": {} + } } -} +} \ No newline at end of file diff --git a/test/common-utils/terminal-title-on-xterm.sh b/test/common-utils/terminal-title-on-xterm.sh new file mode 100755 index 000000000..9b483651f --- /dev/null +++ b/test/common-utils/terminal-title-on-xterm.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +. /etc/os-release + +# Make sure bashrc is applied +source /root/.bashrc + +check "check_term_is_set" test "$TERM" = "xterm" +check "check_term_is_set" test "$PROMPT_COMMAND" = "precmd" + +# Report result +reportResults \ No newline at end of file diff --git a/test/conda/scenarios.json b/test/conda/scenarios.json index 9cc2fc136..ef930a17c 100644 --- a/test/conda/scenarios.json +++ b/test/conda/scenarios.json @@ -1,6 +1,6 @@ { "install_conda": { - "image": "ubuntu:focal", + "image": "ubuntu:noble", "features": { "conda": { "version": "4.12.0", diff --git a/test/desktop-lite/scenarios.json b/test/desktop-lite/scenarios.json new file mode 100644 index 000000000..4ee429dd5 --- /dev/null +++ b/test/desktop-lite/scenarios.json @@ -0,0 +1,49 @@ +{ + "test_xtigervnc_novnc_started": { + "image": "ubuntu:noble", + "features": { + "desktop-lite": {} + } + }, + "test_xtigervnc_novnc_started_noVNC_old_launch_script": { + "image": "ubuntu:noble", + "features": { + "desktop-lite": { + "noVncVersion": "1.2.0" + } + } + }, + "test_vnc_resolution_as_container_env_var": { + "image": "ubuntu:noble", + "features": { + "desktop-lite": {} + } + , + "containerEnv": { + "VNC_RESOLUTION": "1920x1080x32" + }, + "forwardPorts": [ + 5901, + 6080 + ] + }, + "test_vnc_resolution_as_remote_env_var": { + "image": "ubuntu:noble", + "features": { + "desktop-lite": {} + }, + "remoteEnv": { + "VNC_RESOLUTION": "1920x1080x32" + }, + "forwardPorts": [ + 5901, + 6080 + ] + }, + "test_xtigervnc_novnc_started_trixie": { + "image": "debian:trixie", + "features": { + "desktop-lite": {} + } + } +} \ No newline at end of file diff --git a/test/desktop-lite/test.sh b/test/desktop-lite/test.sh index 9009aa9c6..32eab53c4 100755 --- a/test/desktop-lite/test.sh +++ b/test/desktop-lite/test.sh @@ -5,9 +5,35 @@ set -e # Optional: Import test library source dev-container-features-test-lib +echoStderr() +{ + echo "$@" 1>&2 +} + +checkOSPackage() { + LABEL=$1 + PACKAGE_NAME=$2 + echo -e "\n🧪 Testing $LABEL" + # Check if the package exists and retrieve its exact version + if [ "$(dpkg-query -W -f='${Status}' "$PACKAGE_NAME" 2>/dev/null | grep -c "ok installed")" -eq 1 ]; then + echo "✅ Package '$PACKAGE_NAME' is installed." + exit 0 + else + echo "❌ Package '$PACKAGE_NAME' is not installed." + exit 1 + fi +} + check "desktop-init-exists" bash -c "ls /usr/local/share/desktop-init.sh" check "log-exists" bash -c "ls /tmp/container-init.log" check "fluxbox-exists" bash -c "ls -la ~/.fluxbox" +. /etc/os-release +if [ "${VERSION_CODENAME}" = "noble" ] || [ "${VERSION_CODENAME}" = "trixie" ]; then + checkOSPackage "if libasound2-dev exists !" "libasound2-dev" +else + checkOSPackage "if libasound2 exists !" "libasound2" +fi + # Report result reportResults \ No newline at end of file diff --git a/test/desktop-lite/test_vnc_resolution_as_container_env_var.sh b/test/desktop-lite/test_vnc_resolution_as_container_env_var.sh new file mode 100644 index 000000000..cc3dd3261 --- /dev/null +++ b/test/desktop-lite/test_vnc_resolution_as_container_env_var.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +GREEN='\033[0;32m'; NC='\033[0m'; RED='\033[0;31m'; YELLOW='\033[0;33m'; + +# Check if xtigervnc & noVnc processes are running after successful installation and initialization +check_process_running() { + port=$1 + # Get process id of process running on specific port + PID=$(lsof -i :$port | awk 'NR==2 {print $2}') + if [ -n "$PID" ]; then + CMD=$(ps -p $PID -o cmd --no-headers) + echo -e "${GREEN}Command running on port $port: ${YELLOW}$CMD${NC}" + else + echo -e "${RED}No process found listening on port $port.${NC}" + exit 1 + fi +} + +check_w_config_resolution() { + configResolution=$1 + actualResolution=$2 + + if echo "$1" | grep -q "$2"; then + echo -e "\n👍👍 ${GREEN}Configured resolution i.e. ${YELLOW}$configResolution${GREEN} has been set as vnc resolution i.e. ${YELLOW}$actualResolution${GREEN} in container.${NC}" + else + echo -e "\n❌❌ ${GREEN}Configured resolution i.e. ${YELLOW}$configResolution${GREEN} couldn't be set as vnc resolution i.e. ${YELLOW}$actualResolution${GREEN} in container.${NC}" + fi +} + +check "Whether xtigervnc is Running" check_process_running 5901 +resolution=$(ps -x -ww | grep Xtigervnc | awk "{for(i=1;i<=NF;i++) if (\$i ~ /-geometry/) {print \$(i+1); exit}}") +check "xtigervnc resolution" bash -c '$resolution' +check_w_config_resolution $VNC_RESOLUTION $resolution +sleep 2 +check "Whether no_vnc is Running" check_process_running 6080 + +check "desktop-init-exists" bash -c "ls /usr/local/share/desktop-init.sh" +check "log-exists" bash -c "ls /tmp/container-init.log" +check "log file contents" bash -c "cat /tmp/container-init.log" + +# Report result +reportResults \ No newline at end of file diff --git a/test/desktop-lite/test_vnc_resolution_as_remote_env_var.sh b/test/desktop-lite/test_vnc_resolution_as_remote_env_var.sh new file mode 100644 index 000000000..18409505c --- /dev/null +++ b/test/desktop-lite/test_vnc_resolution_as_remote_env_var.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Check if xtigervnc & noVnc processes are running after successful installation and initialization +check_process_running() { + port=$1 + # Get process id of process running on specific port + PID=$(lsof -i :$port | awk 'NR==2 {print $2}') + GREEN='\033[0;32m'; NC='\033[0m'; RED='\033[0;31m'; YELLOW='\033[0;33m'; + if [ -n "$PID" ]; then + CMD=$(ps -p $PID -o cmd --no-headers) + echo -e "${GREEN}Command running on port $port: ${YELLOW}$CMD${NC}" + else + echo -e "${RED}No process found listening on port $port.${NC}" + exit 1 + fi +} + +check "Whether xtigervnc is Running" check_process_running 5901 +check "xtigervnc resolution" bash -c 'ps -x -ww | grep Xtigervnc | awk "{for(i=1;i<=NF;i++) if (\$i ~ /-geometry/) {print \$(i+1); exit}}"' +sleep 2 +check "Whether no_vnc is Running" check_process_running 6080 + +check "desktop-init-exists" bash -c "ls /usr/local/share/desktop-init.sh" +check "log-exists" bash -c "ls /tmp/container-init.log" +check "log file contents" bash -c "cat /tmp/container-init.log" + +# Report result +reportResults \ No newline at end of file diff --git a/test/desktop-lite/test_xtigervnc_novnc_started.sh b/test/desktop-lite/test_xtigervnc_novnc_started.sh new file mode 100644 index 000000000..671032229 --- /dev/null +++ b/test/desktop-lite/test_xtigervnc_novnc_started.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Check if xtigervnc & noVnc processes are running after successful installation and initialization +check_process_running() { + port=$1 + # Get process id of process running on specific port + PID=$(lsof -i :$port | awk 'NR==2 {print $2}') + if [ -n "$PID" ]; then + CMD=$(ps -p $PID -o cmd --no-headers) + GREEN='\033[0;32m'; NC='\033[0m'; RED='\033[0;31m'; YELLOW='\033[0;33m'; + echo -e "${GREEN}Command running on port $port: ${YELLOW}$CMD${NC}" + else + echo -e "${RED}No process found listening on port $port.${NC}" + fi +} + +check "Whether xtigervnc is Running" check_process_running 5901 +sleep 1 +check "Whether no_vnc is Running" check_process_running 6080 + +check "desktop-init-exists" bash -c "ls /usr/local/share/desktop-init.sh" +check "log-exists" bash -c "ls /tmp/container-init.log" +check "log file contents" bash -c "cat /tmp/container-init.log" + +# Report result +reportResults \ No newline at end of file diff --git a/test/desktop-lite/test_xtigervnc_novnc_started_noVNC_old_launch_script.sh b/test/desktop-lite/test_xtigervnc_novnc_started_noVNC_old_launch_script.sh new file mode 100644 index 000000000..5e0898c7d --- /dev/null +++ b/test/desktop-lite/test_xtigervnc_novnc_started_noVNC_old_launch_script.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Check if xtigervnc & noVnc processes are running after successful installation and initialization +check_process_running() { + port=$1 + # Get process id of process running on specific port + PID=$(lsof -i :$port | awk 'NR==2 {print $2}') + if [ -n "$PID" ]; then + CMD=$(ps -p $PID -o cmd --no-headers) + GREEN='\033[0;32m'; NC='\033[0m'; RED='\033[0;31m'; YELLOW='\033[0;33m'; + echo -e "${GREEN}Command running on port $port: ${YELLOW}$CMD${NC}" + else + echo -e "${RED}No process found listening on port $port.${NC}" + fi +} + +check "Whether xtigervnc is Running" check_process_running 5901 +sleep 1 +check "Whether no_vnc is Running" check_process_running 6080 + +check "desktop-init-exists" bash -c "ls /usr/local/share/desktop-init.sh" +check "log-exists" bash -c "ls /tmp/container-init.log" +check "log file contents" bash -c "cat /tmp/container-init.log" + +# Report result +reportResults + + diff --git a/test/desktop-lite/test_xtigervnc_novnc_started_trixie.sh b/test/desktop-lite/test_xtigervnc_novnc_started_trixie.sh new file mode 100644 index 000000000..671032229 --- /dev/null +++ b/test/desktop-lite/test_xtigervnc_novnc_started_trixie.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Check if xtigervnc & noVnc processes are running after successful installation and initialization +check_process_running() { + port=$1 + # Get process id of process running on specific port + PID=$(lsof -i :$port | awk 'NR==2 {print $2}') + if [ -n "$PID" ]; then + CMD=$(ps -p $PID -o cmd --no-headers) + GREEN='\033[0;32m'; NC='\033[0m'; RED='\033[0;31m'; YELLOW='\033[0;33m'; + echo -e "${GREEN}Command running on port $port: ${YELLOW}$CMD${NC}" + else + echo -e "${RED}No process found listening on port $port.${NC}" + fi +} + +check "Whether xtigervnc is Running" check_process_running 5901 +sleep 1 +check "Whether no_vnc is Running" check_process_running 6080 + +check "desktop-init-exists" bash -c "ls /usr/local/share/desktop-init.sh" +check "log-exists" bash -c "ls /tmp/container-init.log" +check "log file contents" bash -c "cat /tmp/container-init.log" + +# Report result +reportResults \ No newline at end of file diff --git a/test/docker-in-docker/dockerIp6tablesDisabledTest.sh b/test/docker-in-docker/dockerIp6tablesDisabledTest.sh new file mode 100644 index 000000000..977054ffc --- /dev/null +++ b/test/docker-in-docker/dockerIp6tablesDisabledTest.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +ip6tablesCheck() { + if command -v ip6tables > /dev/null 2>&1; then + if ip6tables -L > /dev/null 2>&1; then + echo "✔️ ip6tables is enabled." + else + echo "❌ ip6tables is disabled." + fi + else + echo "❕ip6tables command not found. ❕" + fi +} + +check "ip6tables" ip6tablesCheck +check "ip6tables check" bash -c "docker network inspect bridge" +check "docker-build" docker build ./ + +reportResults \ No newline at end of file diff --git a/test/docker-in-docker/docker_build.sh b/test/docker-in-docker/docker_build.sh index d51e7d644..322a819fc 100755 --- a/test/docker-in-docker/docker_build.sh +++ b/test/docker-in-docker/docker_build.sh @@ -9,8 +9,12 @@ source dev-container-features-test-lib check "docker-buildx" docker buildx version check "docker-build" docker build ./ -check "installs docker-compose v1 install" bash -c "type docker-compose" check "installs compose-switch" bash -c "[[ -f /usr/local/bin/compose-switch ]]" +check "docker compose" bash -c "docker compose version | grep -E '2.[0-9]+.[0-9]+'" +check "docker-compose" bash -c "docker-compose --version | grep -E '2.[0-9]+.[0-9]+'" + +check "docker-buildx" bash -c "docker buildx version" +check "docker-buildx-path" bash -c "ls -la /usr/libexec/docker/cli-plugins/docker-buildx" # Report result reportResults diff --git a/test/docker-in-docker/docker_build_2.sh b/test/docker-in-docker/docker_build_2.sh index 742b222fb..d60cd937a 100644 --- a/test/docker-in-docker/docker_build_2.sh +++ b/test/docker-in-docker/docker_build_2.sh @@ -8,6 +8,8 @@ source dev-container-features-test-lib # Definition specific tests check "docker-buildx" docker buildx version check "docker-build" docker build ./ +check "docker-buildx" bash -c "docker buildx version" +check "docker-buildx-path" bash -c "ls -la /usr/libexec/docker/cli-plugins/docker-buildx" # Report result reportResults diff --git a/test/docker-in-docker/docker_build_fallback_buildx.sh b/test/docker-in-docker/docker_build_fallback_buildx.sh new file mode 100644 index 000000000..e139613a0 --- /dev/null +++ b/test/docker-in-docker/docker_build_fallback_buildx.sh @@ -0,0 +1,190 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests before test for fallback +HL="\033[1;33m" +N="\033[0;37m" +echo -e "\n👉${HL} docker/buildx version as installed by docker-in-docker feature${N}" +check "docker-buildx" docker buildx version +check "docker-build" docker build ./ +check "docker-buildx" bash -c "docker buildx version" +check "docker-buildx-path" bash -c "ls -la /usr/libexec/docker/cli-plugins/docker-buildx" + +# Code to test the made up scenario when latest version of docker/buildx fails on wget command for fetching the artifacts +architecture="$(dpkg --print-architecture)" +case "${architecture}" in + amd64) target_compose_arch=x86_64 ;; + arm64) target_compose_arch=aarch64 ;; + *) + echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine." + exit 1 +esac + +docker_home="/usr/libexec/docker" +cli_plugins_dir="${docker_home}/cli-plugins" + +# Figure out correct version of a three part version number is not passed +find_version_from_git_tags() { + local variable_name=$1 + local requested_version=${!variable_name} + if [ "${requested_version}" = "none" ]; then return; fi + local repository=$2 + local prefix=${3:-"tags/v"} + local separator=${4:-"."} + local last_part_optional=${5:-"false"} + if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then + local escaped_separator=${separator//./\\.} + local last_part + if [ "${last_part_optional}" = "true" ]; then + last_part="(${escaped_separator}[0-9]+)?" + else + last_part="${escaped_separator}[0-9]+" + fi + local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$" + local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" + if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then + declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)" + else + set +e + declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")" + set -e + fi + fi + if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then + err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2 + exit 1 + fi + echo "${variable_name}=${!variable_name}" +} + +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + +# Function to fetch the version released prior to the latest version +get_previous_version() { + local url=$1 + local repo_url=$2 + local variable_name=$3 + local mode=$4 + prev_version=${!variable_name} + + echo -e "\nAttempting to find latest version using Github Api." + + output=$(curl -s "$repo_url"); + message=$(echo "$output" | jq -r '.message') + + if [[ $mode != "install_from_github_api_valid" ]]; then + message="API rate limit exceeded" + fi + + if [[ $message == "API rate limit exceeded"* ]]; then + echo -e "\nAttempting to find latest version using Github Api Failed. Exceeded API Rate Limit." + echo -e "\nAttempting to find latest version using Github Tags." + find_prev_version_from_git_tags prev_version "$url" "tags/v" + declare -g ${variable_name}="${prev_version}" + else + echo -e "\nAttempting to find latest version using Github Api Succeeded." + version=$(echo "$output" | jq -r '.tag_name') + declare -g ${variable_name}="${version#v}" + fi + echo "${variable_name}=${!variable_name}" +} + +get_github_api_repo_url() { + local url=$1 + echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases/latest" +} + +install_using_get_previous_version() { + local url=$1 + local mode=$2 + local repo_url=$(get_github_api_repo_url "$url") + echo -e "\n(!) Failed to fetch the latest artifacts for docker buildx v${buildx_version}..." + get_previous_version "${url}" "${repo_url}" buildx_version "${mode}" + buildx_file_name="buildx-v${buildx_version}.linux-${architecture}" + echo -e "\nAttempting to install v${buildx_version}" + wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} +} + +install_docker_buildx() { + mode=$1 + echo -e "\n${HL} Creating a scenario for fallback${N}\n" + + buildx_version="0.13.xyz" + echo "(*) Installing buildx ${buildx_version}..." + buildx_file_name="buildx-v${buildx_version}.linux-${architecture}" + cd /tmp + + docker_buildx_url="https://github.com/docker/buildx" + wget https://github.com/docker/buildx/releases/download/v${buildx_version}/${buildx_file_name} || install_using_get_previous_version "${docker_buildx_url}" "${mode}" + + docker_home="/usr/libexec/docker" + cli_plugins_dir="${docker_home}/cli-plugins" + + mkdir -p ${cli_plugins_dir} + mv ${buildx_file_name} ${cli_plugins_dir}/docker-buildx + chmod +x ${cli_plugins_dir}/docker-buildx + + chown -R "${USERNAME}:docker" "${docker_home}" + chmod -R g+r+w "${docker_home}" + find "${docker_home}" -type d -print0 | xargs -n 1 -0 chmod g+s +} + +echo -e "\n👉${HL} docker-buildx version as installed by docker-in-docker test ( installing by github api ) ${N}" +install_docker_buildx "install_from_github_api_valid" + +# Definition specific tests after test for fallback +check "docker-buildx" docker buildx version +check "docker-buildx" bash -c "docker buildx version" + +echo -e "\n👉${HL} docker-buildx version as installed by docker-in-docker test ( installing by find_prev_version_from_git_tags ) ${N}" +install_docker_buildx + +# Definition specific tests after test for fallback +check "docker-buildx" docker buildx version +check "docker-buildx" bash -c "docker buildx version" + +# Report result +reportResults diff --git a/test/docker-in-docker/docker_build_fallback_compose.sh b/test/docker-in-docker/docker_build_fallback_compose.sh new file mode 100644 index 000000000..ed1dab3f4 --- /dev/null +++ b/test/docker-in-docker/docker_build_fallback_compose.sh @@ -0,0 +1,171 @@ +#!/bin/bash + +# Optional: Import test library +source dev-container-features-test-lib + +# Setup STDERR. +err() { + echo "(!) $*" >&2 +} + +HL="\033[1;33m" +N="\033[0;37m" +echo -e "\n👉${HL} docker-compose version as installed by docker-in-docker feature${N}" +check "docker-compose" bash -c "docker-compose version" + +architecture="$(dpkg --print-architecture)" +case "${architecture}" in + amd64) target_compose_arch=x86_64 ;; + arm64) target_compose_arch=aarch64 ;; + *) + echo "(!) Docker in docker does not support machine architecture '$architecture'. Please use an x86-64 or ARM64 machine." + exit 1 +esac + +docker_compose_path="/usr/local/bin/docker-compose" +cli_plugins_dir="${docker_home}/cli-plugins" + +# Figure out correct version of a three part version number is not passed +find_version_from_git_tags() { + local variable_name=$1 + local requested_version=${!variable_name} + if [ "${requested_version}" = "none" ]; then return; fi + local repository=$2 + local prefix=${3:-"tags/v"} + local separator=${4:-"."} + local last_part_optional=${5:-"false"} + if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then + local escaped_separator=${separator//./\\.} + local last_part + if [ "${last_part_optional}" = "true" ]; then + last_part="(${escaped_separator}[0-9]+)?" + else + last_part="${escaped_separator}[0-9]+" + fi + local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$" + local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" + if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then + declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)" + else + set +e + declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")" + set -e + fi + fi + if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then + err "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2 + exit 1 + fi + echo "${variable_name}=${!variable_name}" +} + +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + +# Function to fetch the version released prior to the latest version +get_previous_version() { + local url=$1 + local repo_url=$2 + local variable_name=$3 + local mode=$4 + prev_version=${!variable_name} + + output=$(curl -s "$repo_url"); + + if echo "$output" | jq -e 'type == "object"' > /dev/null; then + message=$(echo "$output" | jq -r '.message') + + if [[ $message == "API rate limit exceeded"* ]]; then + echo -e "\nAn attempt to find latest version using GitHub Api Failed... \nReason: ${message}" + echo -e "\nAttempting to find latest version using GitHub tags." + find_prev_version_from_git_tags prev_version "$url" "tags/v" + declare -g ${variable_name}="${prev_version}" + fi + elif echo "$output" | jq -e 'type == "array"' > /dev/null; then + echo -e "\nAttempting to find latest version using GitHub Api." + version=$(echo "$output" | jq -r '.[1].tag_name') + declare -g ${variable_name}="${version#v}" + fi + echo "${variable_name}=${!variable_name}" +} + +get_github_api_repo_url() { + local url=$1 + echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases" +} + +install_using_get_previous_version() { + local url=$1 + local mode=$2 + local repo_url=$(get_github_api_repo_url "$url") + echo -e "\n(!) Failed to fetch the latest artifacts for docker-compose v${compose_version}..." + get_previous_version "$url" "$repo_url" compose_version "$mode" + echo -e "\nAttempting to install v${compose_version}" + curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} +} + +install_docker_compose() { + mode=$1 + compose_version="2.25.xyz" + docker_compose_url="https://github.com/docker/compose" + echo "(*) Installing docker-compose ${compose_version}..." + curl -fsSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}" -o ${docker_compose_path} || install_using_get_previous_version "$docker_compose_url" "$mode" +} + +chmod +x ${docker_compose_path} + +# Download the SHA256 checksum +DOCKER_COMPOSE_SHA256="$(curl -sSL "https://github.com/docker/compose/releases/download/v${compose_version}/docker-compose-linux-${target_compose_arch}.sha256" | awk '{print $1}')" +echo "${DOCKER_COMPOSE_SHA256} ${docker_compose_path}" > docker-compose.sha256sum +sha256sum -c docker-compose.sha256sum --ignore-missing + +mkdir -p ${cli_plugins_dir} +cp ${docker_compose_path} ${cli_plugins_dir} + +echo -e "\n👉${HL} docker-compose version as installed by docker-in-docker test ( installing by github api ) ${N}" +install_docker_compose "install_from_github_api_valid" + +check "docker-compose" bash -c "docker-compose version" + +echo -e "\n👉${HL} docker-compose version as installed by docker-in-docker test ( installing by find_prev_version_from_git_tags ) ${N}" +install_docker_compose + +check "docker-compose" bash -c "docker-compose version" diff --git a/test/docker-in-docker/docker_build_older.sh b/test/docker-in-docker/docker_build_older.sh index 742b222fb..d60cd937a 100644 --- a/test/docker-in-docker/docker_build_older.sh +++ b/test/docker-in-docker/docker_build_older.sh @@ -8,6 +8,8 @@ source dev-container-features-test-lib # Definition specific tests check "docker-buildx" docker buildx version check "docker-build" docker build ./ +check "docker-buildx" bash -c "docker buildx version" +check "docker-buildx-path" bash -c "ls -la /usr/libexec/docker/cli-plugins/docker-buildx" # Report result reportResults diff --git a/test/docker-in-docker/docker_buildx.sh b/test/docker-in-docker/docker_buildx.sh new file mode 100755 index 000000000..c571596fc --- /dev/null +++ b/test/docker-in-docker/docker_buildx.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "docker-buildx" bash -c "docker buildx version" +check "docker-buildx-path" bash -c "ls -la /usr/libexec/docker/cli-plugins/docker-buildx" + +check "docker-buildx" docker buildx version +check "docker-build" docker build ./ + +check "installs docker-compose v1 install" bash -c "type docker-compose" +check "installs compose-switch" bash -c "[[ -f /usr/local/bin/compose-switch ]]" + +# Report result +reportResults diff --git a/test/docker-in-docker/docker_compose_v1.sh b/test/docker-in-docker/docker_compose_v1.sh new file mode 100755 index 000000000..3f7453c83 --- /dev/null +++ b/test/docker-in-docker/docker_compose_v1.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests + +check "docker-compose" bash -c "docker-compose version | grep -E '1.[0-9]+.[0-9]+'" + +# Report result +reportResults diff --git a/test/docker-in-docker/docker_compose_v2.sh b/test/docker-in-docker/docker_compose_v2.sh new file mode 100755 index 000000000..5a512d2c5 --- /dev/null +++ b/test/docker-in-docker/docker_compose_v2.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests + +check "docker compose" bash -c "docker compose version | grep -E '2.[0-9]+.[0-9]+'" +check "docker-compose" bash -c "docker-compose --version | grep -E '2.[0-9]+.[0-9]+'" +check "installs compose-switch as docker-compose" bash -c "[[ -f /usr/local/bin/docker-compose ]]" +check "installs compose-switch" bash -c "[[ -f /usr/local/bin/compose-switch ]]" + +# Report result +reportResults diff --git a/test/docker-in-docker/docker_python_bookworm.sh b/test/docker-in-docker/docker_python_bookworm.sh new file mode 100755 index 000000000..b7080dee1 --- /dev/null +++ b/test/docker-in-docker/docker_python_bookworm.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "docker-buildx" bash -c "docker buildx version" +check "docker-buildx-path" bash -c "ls -la /usr/libexec/docker/cli-plugins/docker-buildx" + +check "docker-buildx" docker buildx version +check "docker-build" docker build ./ + +check "installs docker-compose v2 install" bash -c "type docker-compose" +check "docker compose" bash -c "docker compose version | grep -E '2.[0-9]+.[0-9]+'" +check "docker-compose" bash -c "docker-compose --version | grep -E '2.[0-9]+.[0-9]+'" + +check "installs compose-switch as docker-compose" bash -c "[[ -f /usr/local/bin/docker-compose ]]" + +# Report result +reportResults diff --git a/test/docker-in-docker/docker_specific_moby_buildx.sh b/test/docker-in-docker/docker_specific_moby_buildx.sh new file mode 100755 index 000000000..b2d4610a1 --- /dev/null +++ b/test/docker-in-docker/docker_specific_moby_buildx.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib +# Definition specific tests +check "moby-buildx" bash -c "dpkg-query -W moby-buildx | grep -E '0.14.0'" + +check "docker-buildx" bash -c "docker buildx version" +check "docker-buildx-path" bash -c "ls -la /usr/libexec/docker/cli-plugins/docker-buildx" + +check "docker-buildx" docker buildx version +check "docker-build" docker build ./ + +check "installs docker-compose v1 install" bash -c "type docker-compose" +check "installs compose-switch" bash -c "[[ -f /usr/local/bin/compose-switch ]]" + +# Report result +reportResults diff --git a/test/docker-in-docker/install_moby_on_debian_trixie.sh b/test/docker-in-docker/install_moby_on_debian_trixie.sh new file mode 120000 index 000000000..6c5c2a157 --- /dev/null +++ b/test/docker-in-docker/install_moby_on_debian_trixie.sh @@ -0,0 +1 @@ +install_on_debian_trixie.sh \ No newline at end of file diff --git a/test/docker-in-docker/install_on_debian_trixie.sh b/test/docker-in-docker/install_on_debian_trixie.sh new file mode 100644 index 000000000..c6c679684 --- /dev/null +++ b/test/docker-in-docker/install_on_debian_trixie.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +# Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "docker installed" bash -c "type docker" + +# Report results +reportResults diff --git a/test/docker-in-docker/pin_docker-ce_version_moby_false.sh b/test/docker-in-docker/pin_docker-ce_version_moby_false.sh new file mode 100644 index 000000000..ec33d1504 --- /dev/null +++ b/test/docker-in-docker/pin_docker-ce_version_moby_false.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +# Optional: Import test library +source dev-container-features-test-lib + +check "docker-ce" bash -c "docker --version" +check "docker-ce-cli" bash -c "docker version" + +#report result +reportResults \ No newline at end of file diff --git a/test/docker-in-docker/scenarios.json b/test/docker-in-docker/scenarios.json index 1274877b9..3b5dce013 100644 --- a/test/docker-in-docker/scenarios.json +++ b/test/docker-in-docker/scenarios.json @@ -1,4 +1,22 @@ { + "docker_build_fallback_compose": { + "image": "ubuntu:noble", + "features": { + "docker-in-docker": { + "version": "latest", + "dockerDashComposeVersion": "latest" + } + } + }, + "dockerIp6tablesDisabledTest": { + "image": "ubuntu:noble", + "features": { + "docker-in-docker": { + "version": "27.0.3", + "disableIp6tables": true + } + } + }, "dockerDefaultAddressPool": { "image": "mcr.microsoft.com/vscode/devcontainers/javascript-node:0-18", "remoteUser": "node", @@ -38,7 +56,7 @@ "remoteUser": "node" }, "docker_build_2": { - "image": "ubuntu:focal", + "image": "ubuntu:noble", "features": { "docker-in-docker": { "version": "latest", @@ -68,11 +86,94 @@ }, "remoteUser": "node" }, + "docker_buildx": { + "image": "mcr.microsoft.com/devcontainers/base:bookworm", + "features": { + "docker-in-docker": { + "moby": false, + "installDockerBuildx": true + } + } + }, + "docker_python_bookworm": { + "image": "mcr.microsoft.com/devcontainers/base:bookworm", + "features": { + "docker-in-docker": { + "moby": true, + "installDockerBuildx": true, + "dockerDashComposeVersion": "v2" + } + } + }, + "docker_compose_v1": { + "image": "mcr.microsoft.com/devcontainers/base:noble", + "features": { + "docker-in-docker": { + "moby": true, + "installDockerBuildx": true, + "dockerDashComposeVersion": "v1" + } + } + }, + "docker_compose_v2": { + "image": "mcr.microsoft.com/devcontainers/base:noble", + "features": { + "docker-in-docker": { + "moby": true, + "installDockerBuildx": true, + "dockerDashComposeVersion": "v2" + } + } + }, + "docker_build_fallback_buildx": { + "image": "ubuntu:noble", + "features": { + "docker-in-docker": { + "version": "latest", + "installDockerBuildx": true + } + } + }, + "install_on_debian_trixie": { + "image": "debian:trixie", + "features": { + "docker-in-docker": { + "moby": false + } + } + }, + "docker_specific_moby_buildx": { + "image": "ubuntu:noble", + "features": { + "docker-in-docker": { + "mobyBuildxVersion": "0.14.0" + } + } + }, + "pin_docker-ce_version_moby_false": { + "image": "mcr.microsoft.com/devcontainers/base:debian", + "features": { + "docker-in-docker": { + "version": "28.4.0", + "moby": "false", + "mobyBuildxVersion": "latest", + "dockerDashComposeVersion": "none", + "azureDnsAutoDetection": "true", + "dockerDefaultAddressPool": "", + "installDockerBuildx": "true", + "installDockerComposeSwitch": "true", + "disableIp6tables": "false" + } + } + }, // DO NOT REMOVE: This scenario is used by the docker-in-docker-stress-test workflow "docker_with_on_create_command": { "image": "mcr.microsoft.com/devcontainers/base:debian", "features": { - "docker-in-docker": {} + "docker-in-docker": { + "version": "latest", + "moby": "false" + } }, "remoteUser": "vscode", "onCreateCommand": "docker ps && sleep 5s && docker ps" diff --git a/test/docker-in-docker/test.sh b/test/docker-in-docker/test.sh index e86a841c2..10a7232b6 100755 --- a/test/docker-in-docker/test.sh +++ b/test/docker-in-docker/test.sh @@ -12,6 +12,7 @@ check "docker-ps" bash -c "docker ps" check "log-exists" bash -c "ls /tmp/dockerd.log" check "log-for-completion" bash -c "cat /tmp/dockerd.log | grep 'Daemon has completed initialization'" check "log-contents" bash -c "cat /tmp/dockerd.log | grep 'API listen on /var/run/docker.sock'" +check "moby-buildx" bash -c "dpkg-query -W moby-buildx" # Report result reportResults \ No newline at end of file diff --git a/test/docker-outside-of-docker/docker_build_compose_fallback.sh b/test/docker-outside-of-docker/docker_build_compose_fallback.sh new file mode 100644 index 000000000..c1bdeb544 --- /dev/null +++ b/test/docker-outside-of-docker/docker_build_compose_fallback.sh @@ -0,0 +1,164 @@ +#!/bin/bash + +# Optional: Import test library +source dev-container-features-test-lib + +echo -e "\n👉 Checking version of compose-switch installed as docker-compose as installed by feature"; +check "installs compose-switch as docker-compose" bash -c "[[ -f /usr/local/bin/docker-compose ]]" + +trap 'echo "Last executed command failed at line ${LINENO}"' ERR + +# Fetch host/container arch. +architecture="$(dpkg --print-architecture)" + +sudo mkdir -p /usr/local/bin/docker-compose +sudo chmod +x /usr/local/bin/docker-compose + +apt_get_update() +{ + if [ "$(find /var/lib/apt/lists/* | wc -l)" = "0" ]; then + echo "Running apt-get update..." + apt-get update -y + fi +} + +# Checks if packages are installed and installs them if not +check_packages() { + if ! dpkg -s "$@" > /dev/null 2>&1; then + apt_get_update + apt-get -y install --no-install-recommends "$@" + fi +} + +# Figure out correct version of a three part version number is not passed +find_version_from_git_tags() { + local variable_name=$1 + local requested_version=${!variable_name} + if [ "${requested_version}" = "none" ]; then return; fi + local repository=$2 + local prefix=${3:-"tags/v"} + local separator=${4:-"."} + local last_part_optional=${5:-"false"} + if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then + local escaped_separator=${separator//./\\.} + local last_part + if [ "${last_part_optional}" = "true" ]; then + last_part="(${escaped_separator}[0-9]+)?" + else + last_part="${escaped_separator}[0-9]+" + fi + local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$" + local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" + if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then + declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)" + else + set +e + declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")" + set -e + fi + fi + if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then + echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2 + exit 1 + fi + echo "${variable_name}=${!variable_name}" +} + +# Use semver logic to decrement a version number then look for the closest match +find_prev_version_from_git_tags() { + local variable_name=$1 + local current_version=${!variable_name} + local repository=$2 + # Normally a "v" is used before the version number, but support alternate cases + local prefix=${3:-"tags/v"} + # Some repositories use "_" instead of "." for version number part separation, support that + local separator=${4:-"."} + # Some tools release versions that omit the last digit (e.g. go) + local last_part_optional=${5:-"false"} + # Some repositories may have tags that include a suffix (e.g. actions/node-versions) + local version_suffix_regex=$6 + # Try one break fix version number less if we get a failure. Use "set +e" since "set -e" can cause failures in valid scenarios. + set +e + major="$(echo "${current_version}" | grep -oE '^[0-9]+' || echo '')" + minor="$(echo "${current_version}" | grep -oP '^[0-9]+\.\K[0-9]+' || echo '')" + breakfix="$(echo "${current_version}" | grep -oP '^[0-9]+\.[0-9]+\.\K[0-9]+' 2>/dev/null || echo '')" + + if [ "${minor}" = "0" ] && [ "${breakfix}" = "0" ]; then + ((major=major-1)) + declare -g ${variable_name}="${major}" + # Look for latest version from previous major release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + # Handle situations like Go's odd version pattern where "0" releases omit the last part + elif [ "${breakfix}" = "" ] || [ "${breakfix}" = "0" ]; then + ((minor=minor-1)) + declare -g ${variable_name}="${major}.${minor}" + # Look for latest version from previous minor release + find_version_from_git_tags "${variable_name}" "${repository}" "${prefix}" "${separator}" "${last_part_optional}" + else + ((breakfix=breakfix-1)) + if [ "${breakfix}" = "0" ] && [ "${last_part_optional}" = "true" ]; then + declare -g ${variable_name}="${major}.${minor}" + else + declare -g ${variable_name}="${major}.${minor}.${breakfix}" + fi + fi + set -e +} + +# Function to fetch the version released prior to the latest version +get_previous_version() { + local url=$1 + local repo_url=$2 + local variable_name=$3 + local mode=$4 + prev_version=${!variable_name} + output=$(curl -s "$repo_url"); + check_packages jq + if echo "$output" | jq -e 'type == "object"' > /dev/null; then + message=$(echo "$output" | jq -r '.message') + if [[ $message == "API rate limit exceeded"* ]] || [[ $mode == 'mode1' ]]; then + echo -e "\nAn attempt to find previous to latest version using GitHub Api Failed... \nReason: ${message}" + echo -e "\nAttempting to find previous to latest version using GitHub tags." + find_prev_version_from_git_tags prev_version "$url" "tags/v" + declare -g ${variable_name}="${prev_version}" + fi + elif echo "$output" | jq -e 'type == "array"' > /dev/null; then + echo -e "\nAttempting to find previous version using GitHub Api." + version=$(echo "$output" | jq -r '.[1].tag_name') + declare -g ${variable_name}="${version#v}" + fi +} + +get_github_api_repo_url() { + local url=$1 + echo "${url/https:\/\/github.com/https:\/\/api.github.com\/repos}/releases" +} + +install_compose_switch_fallback() { + compose_switch_url=$1 + mode=$2 + repo_url=$(get_github_api_repo_url "${compose_switch_url}") + echo -e "\n(!) Failed to fetch the latest artifacts for compose-switch v${compose_switch_version}..." + get_previous_version "${compose_switch_url}" "${repo_url}" compose_switch_version $mode + echo -e "\nAttempting to install v${compose_switch_version}" + sudo curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${architecture}" -o /usr/local/bin/docker-compose +} + +install_compose-switch_as_docker-compose() { + mode=$1 + echo "(*) Installing compose-switch as docker-compose..." + compose_switch_version="1.0.6" + compose_switch_url="https://github.com/docker/compose-switch" + sudo curl -fsSL "https://github.com/docker/compose-switch/releases/download/v${compose_switch_version}/docker-compose-linux-${architecture}" -o /usr/local/bin/docker-compose || install_compose_switch_fallback "${compose_switch_url}" $mode + sudo chmod +x /usr/local/bin/docker-compose +} + +echo -e "\n👉 Trying to install compose-switch as docker-compose using mode 1 ( find_prev_version_from_git_tags method )"; +install_compose-switch_as_docker-compose "mode1" +check "installs compose-switch as docker-compose mode 1" bash -c "[[ -f /usr/local/bin/docker-compose ]]" +check "docker-compose version" bash -c "docker-compose version" + +echo -e "\n👉 Trying to install compose-switch as docker-compose using mode 2 ( GitHub Api )"; +install_compose-switch_as_docker-compose "mode2" +check "installs compose-switch as docker-compose mode 2" bash -c "[[ -f /usr/local/bin/docker-compose ]]" +check "docker-compose version" bash -c "docker-compose version" \ No newline at end of file diff --git a/test/docker-outside-of-docker/docker_init.sh b/test/docker-outside-of-docker/docker_init.sh index b47285ead..88153c1e0 100644 --- a/test/docker-outside-of-docker/docker_init.sh +++ b/test/docker-outside-of-docker/docker_init.sh @@ -12,7 +12,8 @@ check "docker-compose" bash -c "docker-compose --version" check "docker-init-exists" bash -c "ls /usr/local/share/docker-init.sh" check "log-exists" bash -c "ls /tmp/vscr-docker-from-docker.log" check "log-contents-for-success" bash -c "cat /tmp/vscr-docker-from-docker.log | grep 'Success'" -check "log-contents" bash -c "cat /tmp/vscr-docker-from-docker.log | grep 'Proxying /var/run/docker-host.sock to /var/run/docker.sock for vscode'" + +check "log-contents" bash -c "cat /tmp/vscr-docker-from-docker.log | grep 'Ensuring vscode has access to /var/run/docker-host.sock via /var/run/docker.sock'" check "docker-ps" bash -c "docker ps >/dev/null" # Report result diff --git a/test/docker-outside-of-docker/docker_install_compose_switch.sh b/test/docker-outside-of-docker/docker_install_compose_switch.sh new file mode 100644 index 000000000..77f7bbef6 --- /dev/null +++ b/test/docker-outside-of-docker/docker_install_compose_switch.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Check if compose-switch is installed +check_compose_switch_installation() { + COMPOSE_SWITCH_BINARY="/usr/local/bin/compose-switch" + # Check if the binary exists + if [ ! -x "$COMPOSE_SWITCH_BINARY" ]; then + echo "compose-switch binary not found at $COMPOSE_SWITCH_BINARY" + exit 1 + else + compose_switch_version=$("$COMPOSE_SWITCH_BINARY" --version | awk '{print $4}') + if [ -z "$compose_switch_version" ]; then + echo "Unable to determine compose-switch version" + else + echo "compose-switch version: $compose_switch_version" + echo -e "\n✅ compose-switch is installed" + fi + fi +} + +check "Check whether compose-switch is installed" check_compose_switch_installation + +reportResults + \ No newline at end of file diff --git a/test/docker-outside-of-docker/docker_not_install_compose_switch.sh b/test/docker-outside-of-docker/docker_not_install_compose_switch.sh new file mode 100644 index 000000000..7160ef7ce --- /dev/null +++ b/test/docker-outside-of-docker/docker_not_install_compose_switch.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Check if compose-switch is installed +check_compose_switch_installation() { + COMPOSE_SWITCH_BINARY="/usr/local/bin/compose-switch" + # Check if the binary exists + if [ ! -x "$COMPOSE_SWITCH_BINARY" ]; then + echo "compose-switch binary not found at $COMPOSE_SWITCH_BINARY" + echo -e "\n❎ compose-switch is not installed" + else + compose_switch_version=$("$COMPOSE_SWITCH_BINARY" --version | awk '{print $4}') + if [ -z "$compose_switch_version" ]; then + echo "Unable to determine compose-switch version" + else + echo "compose-switch version: $compose_switch_version" + fi + exit 1 + fi +} + +check "Check whether compose-switch is installed" check_compose_switch_installation + +reportResults + \ No newline at end of file diff --git a/test/docker-outside-of-docker/docker_python_bookworm.sh b/test/docker-outside-of-docker/docker_python_bookworm.sh new file mode 100644 index 000000000..21ad3b7a4 --- /dev/null +++ b/test/docker-outside-of-docker/docker_python_bookworm.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "docker-buildx" bash -c "docker buildx version" +check "docker-buildx-path" bash -c "ls -la /usr/libexec/docker/cli-plugins/docker-buildx" + +check "docker-buildx" docker buildx version +check "docker-build" docker build ./ + +check "installs docker-compose v2 install" bash -c "type docker-compose" +check "docker compose" bash -c "docker compose version | grep -E '2.[0-9]+.[0-9]+'" +check "docker-compose" bash -c "docker-compose --version | grep -E '2.[0-9]+.[0-9]+'" + +check "installs compose-switch as docker-compose" bash -c "[[ -f /usr/local/bin/docker-compose ]]" + +# Report result +reportResults \ No newline at end of file diff --git a/test/docker-outside-of-docker/docker_specific_moby_buildx.sh b/test/docker-outside-of-docker/docker_specific_moby_buildx.sh new file mode 100755 index 000000000..8428170e9 --- /dev/null +++ b/test/docker-outside-of-docker/docker_specific_moby_buildx.sh @@ -0,0 +1,19 @@ +#!/bin/bash + +set -e + +# Optional: Import test library +source dev-container-features-test-lib +# Definition specific tests +check "moby-buildx" bash -c "dpkg-query -W moby-buildx | grep -E '0.14.0'" + +check "docker-buildx" bash -c "docker buildx version" +check "docker-buildx-path" bash -c "ls -la /usr/libexec/docker/cli-plugins/docker-buildx" + +check "docker-buildx" docker buildx version +check "docker-build" docker build ./ + +check "installs docker-compose v1 install" bash -c "type docker-compose" + +# Report result +reportResults diff --git a/test/docker-outside-of-docker/install_moby_on_debian_trixie.sh b/test/docker-outside-of-docker/install_moby_on_debian_trixie.sh new file mode 120000 index 000000000..6c5c2a157 --- /dev/null +++ b/test/docker-outside-of-docker/install_moby_on_debian_trixie.sh @@ -0,0 +1 @@ +install_on_debian_trixie.sh \ No newline at end of file diff --git a/test/docker-outside-of-docker/install_on_debian_trixie.sh b/test/docker-outside-of-docker/install_on_debian_trixie.sh new file mode 100644 index 000000000..c6c679684 --- /dev/null +++ b/test/docker-outside-of-docker/install_on_debian_trixie.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +# Import test library +source dev-container-features-test-lib + +# Definition specific tests +check "docker installed" bash -c "type docker" + +# Report results +reportResults diff --git a/test/docker-outside-of-docker/scenarios.json b/test/docker-outside-of-docker/scenarios.json index 3b82c6cc5..2163e7076 100644 --- a/test/docker-outside-of-docker/scenarios.json +++ b/test/docker-outside-of-docker/scenarios.json @@ -1,6 +1,16 @@ { + "docker_build_compose_fallback": { + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", + "features": { + "docker-outside-of-docker": { + "moby": false, + "dockerDashComposeVersion": "latest" + } + }, + "containerUser": "vscode" + }, "docker_init_moby": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { "moby": true @@ -9,7 +19,7 @@ "containerUser": "vscode" }, "docker_init": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { "moby": false @@ -36,7 +46,7 @@ "containerUser": "vscode" }, "docker_build": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { "moby": false, @@ -46,7 +56,7 @@ "containerUser": "vscode" }, "docker_build_moby": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { "moby": true @@ -55,10 +65,10 @@ "containerUser": "vscode" }, "docker_build_no_buildx": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { - "version": "20", + "version": "26", "moby": false, "installDockerBuildx": false } @@ -66,10 +76,10 @@ "containerUser": "vscode" }, "docker_build_no_buildx_moby": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { - "version": "20", + "version": "26", "moby": true, "installDockerBuildx": false } @@ -77,7 +87,7 @@ "containerUser": "vscode" }, "docker_dash_compose_v1": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { "moby": false, @@ -87,7 +97,7 @@ "containerUser": "vscode" }, "docker_dash_compose_v1_moby": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { "moby": true, @@ -97,7 +107,7 @@ "containerUser": "vscode" }, "docker_dash_compose_v2": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { "moby": false, @@ -107,7 +117,7 @@ "containerUser": "vscode" }, "docker_dash_compose_v2_moby": { - "image": "mcr.microsoft.com/devcontainers/base:ubuntu-20.04", + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", "features": { "docker-outside-of-docker": { "moby": true, @@ -124,5 +134,51 @@ } }, "remoteUser": "node" + }, + "docker_specific_moby_buildx": { + "image": "ubuntu:noble", + "features": { + "docker-outside-of-docker": { + "mobyBuildxVersion": "0.14.0" + } + } + }, + "docker_python_bookworm": { + "image": "mcr.microsoft.com/devcontainers/base:bookworm", + "features": { + "docker-outside-of-docker": { + "moby": true, + "installDockerBuildx": true, + "dockerDashComposeVersion": "v2" + } + } + }, + "docker_not_install_compose_switch": { + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", + "features": { + "docker-outside-of-docker": { + "dockerDashComposeVersion": "latest", + "installDockerComposeSwitch": false + } + }, + "containerUser": "vscode" + }, + "docker_install_compose_switch": { + "image": "mcr.microsoft.com/devcontainers/base:ubuntu-24.04", + "features": { + "docker-outside-of-docker": { + "dockerDashComposeVersion": "latest", + "installDockerComposeSwitch": true + } + }, + "containerUser": "vscode" + }, + "install_on_debian_trixie": { + "image": "debian:trixie", + "features": { + "docker-outside-of-docker": { + "moby": false + } + } } } diff --git a/test/docker-outside-of-docker/test.sh b/test/docker-outside-of-docker/test.sh index fe9098e10..5206f5977 100644 --- a/test/docker-outside-of-docker/test.sh +++ b/test/docker-outside-of-docker/test.sh @@ -10,6 +10,7 @@ check "docker compose" bash -c "docker compose version" check "docker-compose" bash -c "docker-compose --version" check "docker-ps" bash -c "docker ps >/dev/null" +check "moby-buildx" bash -c "dpkg-query -W moby-buildx" # Report result reportResults \ No newline at end of file diff --git a/test/dotnet/dotnet_helpers.sh b/test/dotnet/dotnet_helpers.sh index 6c833b444..01e554f66 100644 --- a/test/dotnet/dotnet_helpers.sh +++ b/test/dotnet/dotnet_helpers.sh @@ -9,13 +9,12 @@ fetch_latest_version_in_channel() { local channel="$1" local runtime="$2" if [ "$runtime" = "dotnet" ]; then - wget -qO- "https://dotnetcli.azureedge.net/dotnet/Runtime/$channel/latest.version" + wget -qO- "https://builds.dotnet.microsoft.com/dotnet/Runtime/$channel/latest.version" elif [ "$runtime" = "aspnetcore" ]; then - wget -qO- "https://dotnetcli.azureedge.net/dotnet/aspnetcore/Runtime/$channel/latest.version" + wget -qO- "https://builds.dotnet.microsoft.com/dotnet/aspnetcore/Runtime/$channel/latest.version" else - wget -qO- "https://dotnetcli.azureedge.net/dotnet/Sdk/$channel/latest.version" + wget -qO- "https://builds.dotnet.microsoft.com/dotnet/Sdk/$channel/latest.version" fi - } # Prints the latest dotnet version @@ -47,7 +46,6 @@ is_dotnet_sdk_version_installed() { return $? } - # Asserts that the specified .NET Runtime version is installed # Returns a non-zero exit code if the check fails # Usage: is_dotnet_runtime_version_installed @@ -68,4 +66,14 @@ is_aspnetcore_runtime_version_installed() { local expected="$1" dotnet --list-runtimes | grep --fixed-strings --silent "Microsoft.AspNetCore.App $expected" return $? -} \ No newline at end of file +} + +# Asserts that the specified workload is installed +# Returns a non-zero exit code if the check fails +# Usage: is_dotnet_workload_installed +# Example: is_dotnet_workload_installed "aspire" +is_dotnet_workload_installed() { + local expected="$1" + dotnet workload list | grep --fixed-strings --silent "$expected" + return $? +} diff --git a/test/dotnet/install_aspnetcore_runtime_only.sh b/test/dotnet/install_aspnetcore_runtime_only.sh index 35bd540b7..78e4f099c 100644 --- a/test/dotnet/install_aspnetcore_runtime_only.sh +++ b/test/dotnet/install_aspnetcore_runtime_only.sh @@ -18,10 +18,6 @@ expected=$(fetch_latest_version "aspnetcore") check "Latest ASP.NET Core Runtime version installed" \ is_aspnetcore_runtime_version_installed "$expected" -# Expect this check to fail in November 2023 when .NET 8.0 becomes GA -check "It is a flavor of .NET 7.0" \ -is_aspnetcore_runtime_version_installed "7.0" - # Report results # If any of the checks above exited with a non-zero exit code, the test will fail. reportResults \ No newline at end of file diff --git a/test/dotnet/install_dotnet_daily.sh b/test/dotnet/install_dotnet_daily.sh new file mode 100644 index 000000000..52f4f8d1b --- /dev/null +++ b/test/dotnet/install_dotnet_daily.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +source dev-container-features-test-lib +source dotnet_env.sh +source dotnet_helpers.sh + +# Verify 10.0 SDK (any prerelease containing '10.0') is installed +check ".NET SDK 10.0 installed" \ +is_dotnet_sdk_version_installed "10.0" + +check ".NET Runtime 10.0 installed" \ +is_dotnet_runtime_version_installed "10.0" + +check "ASP.NET Core Runtime 10.0 installed" \ +is_aspnetcore_runtime_version_installed "10.0" + +check "Build and run .NET 10.0 project" \ +dotnet run --project projects/net10.0 + +reportResults diff --git a/test/dotnet/install_dotnet_latest_when_version_is_empty.sh b/test/dotnet/install_dotnet_latest_when_version_is_empty.sh index 7418892e1..b28c45a2f 100644 --- a/test/dotnet/install_dotnet_latest_when_version_is_empty.sh +++ b/test/dotnet/install_dotnet_latest_when_version_is_empty.sh @@ -18,13 +18,6 @@ expected=$(fetch_latest_version) check "Latest .NET SDK version installed" \ is_dotnet_sdk_version_installed "$expected" -# Expect this check to fail in November 2023 when .NET 8.0 becomes GA -check "It is a flavor of .NET 7.0" \ -is_dotnet_sdk_version_installed "7.0" - -check "Build and run example project" \ -dotnet run --project projects/net7.0 - # Report results # If any of the checks above exited with a non-zero exit code, the test will fail. reportResults \ No newline at end of file diff --git a/test/dotnet/install_dotnet_lts.sh b/test/dotnet/install_dotnet_lts.sh index fe37b89f7..da9175c15 100644 --- a/test/dotnet/install_dotnet_lts.sh +++ b/test/dotnet/install_dotnet_lts.sh @@ -18,9 +18,6 @@ expected=$(fetch_latest_version_in_channel "LTS") check "Latest LTS version installed" \ is_dotnet_sdk_version_installed "$expected" -check "Build and run example project" \ -dotnet run --project projects/net6.0 - # Report results # If any of the checks above exited with a non-zero exit code, the test will fail. reportResults \ No newline at end of file diff --git a/test/dotnet/install_dotnet_multiple_versions.sh b/test/dotnet/install_dotnet_multiple_versions.sh index 5f0f12533..482c307b9 100644 --- a/test/dotnet/install_dotnet_multiple_versions.sh +++ b/test/dotnet/install_dotnet_multiple_versions.sh @@ -13,18 +13,33 @@ source dev-container-features-test-lib source dotnet_env.sh source dotnet_helpers.sh +check ".NET SDK 9.0 installed" \ +is_dotnet_sdk_version_installed "9.0" + check ".NET SDK 8.0 installed" \ is_dotnet_sdk_version_installed "8.0" check ".NET SDK 7.0 installed" \ is_dotnet_sdk_version_installed "7.0" -check ".NET SDK 6.0 installed" \ -is_dotnet_sdk_version_installed "6.0" +check ".NET SDK 10.0 installed" \ +is_dotnet_sdk_version_installed "10.0" check "Build example class library" \ dotnet build projects/multitargeting +check "Build and run .NET 9.0 project" \ +dotnet run --project projects/net9.0 + +check "Build and run .NET 8.0 project" \ +dotnet run --project projects/net8.0 + +check "Build and run .NET 7.0 project" \ +dotnet run --project projects/net7.0 + +check "Build and run .NET 10.0 project" \ +dotnet run --project projects/net10.0 + # Report results # If any of the checks above exited with a non-zero exit code, the test will fail. reportResults \ No newline at end of file diff --git a/test/dotnet/install_dotnet_preview.sh b/test/dotnet/install_dotnet_preview.sh new file mode 100644 index 000000000..52f4f8d1b --- /dev/null +++ b/test/dotnet/install_dotnet_preview.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +source dev-container-features-test-lib +source dotnet_env.sh +source dotnet_helpers.sh + +# Verify 10.0 SDK (any prerelease containing '10.0') is installed +check ".NET SDK 10.0 installed" \ +is_dotnet_sdk_version_installed "10.0" + +check ".NET Runtime 10.0 installed" \ +is_dotnet_runtime_version_installed "10.0" + +check "ASP.NET Core Runtime 10.0 installed" \ +is_aspnetcore_runtime_version_installed "10.0" + +check "Build and run .NET 10.0 project" \ +dotnet run --project projects/net10.0 + +reportResults diff --git a/test/dotnet/install_dotnet_runtime_only.sh b/test/dotnet/install_dotnet_runtime_only.sh index 65188c5ec..3c58340d1 100644 --- a/test/dotnet/install_dotnet_runtime_only.sh +++ b/test/dotnet/install_dotnet_runtime_only.sh @@ -18,10 +18,6 @@ expected=$(fetch_latest_version "dotnet") check "Latest .NET Runtime version installed" \ is_dotnet_runtime_version_installed "$expected" -# Expect this check to fail in November 2023 when .NET 8.0 becomes GA -check "It is a flavor of .NET 7.0" \ -is_dotnet_runtime_version_installed "7.0" - # Report results # If any of the checks above exited with a non-zero exit code, the test will fail. reportResults \ No newline at end of file diff --git a/test/dotnet/install_dotnet_specific_release.sh b/test/dotnet/install_dotnet_specific_release.sh index 207e58280..0d01bb4da 100644 --- a/test/dotnet/install_dotnet_specific_release.sh +++ b/test/dotnet/install_dotnet_specific_release.sh @@ -13,13 +13,13 @@ source dev-container-features-test-lib source dotnet_env.sh source dotnet_helpers.sh -expected=$(fetch_latest_version_in_channel "3.1") +expected=$(fetch_latest_version_in_channel "8.0") -check ".NET Core SDK 3.1 installed" \ +check ".NET Core SDK 8.0 installed" \ is_dotnet_sdk_version_installed "$expected" check "Build and run example project" \ -dotnet run --project projects/netcoreapp3.1 +dotnet run --project projects/net8.0 # Report results # If any of the checks above exited with a non-zero exit code, the test will fail. diff --git a/test/dotnet/install_dotnet_specific_release_and_feature_band.sh b/test/dotnet/install_dotnet_specific_release_and_feature_band.sh index 51f5c582b..9820217f2 100644 --- a/test/dotnet/install_dotnet_specific_release_and_feature_band.sh +++ b/test/dotnet/install_dotnet_specific_release_and_feature_band.sh @@ -13,11 +13,11 @@ source dev-container-features-test-lib source dotnet_env.sh source dotnet_helpers.sh -check ".NET SDK 5.0.3xx installed" \ -is_dotnet_sdk_version_installed "5.0.3" +check ".NET SDK 8.0.3xx installed" \ +is_dotnet_sdk_version_installed "8.0.3" check "Build and run example project" \ -dotnet run --project projects/net5.0 +dotnet run --project projects/net8.0 # Report results # If any of the checks above exited with a non-zero exit code, the test will fail. diff --git a/test/dotnet/install_dotnet_workloads.sh b/test/dotnet/install_dotnet_workloads.sh new file mode 100644 index 000000000..37c86a2d4 --- /dev/null +++ b/test/dotnet/install_dotnet_workloads.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e + +# Optional: Import test library bundled with the devcontainer CLI +# See https://github.com/devcontainers/cli/blob/HEAD/docs/features/test.md#dev-container-features-test-lib +# Provides the 'check' and 'reportResults' commands. +source dev-container-features-test-lib + +# Feature-specific tests +# The 'check' command comes from the dev-container-features-test-lib. Syntax is... +# check