Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
91 changes: 61 additions & 30 deletions scripts/dev/configure_container_auth.sh
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,11 @@ source scripts/funcs/kubernetes

CONTAINER_RUNTIME="${CONTAINER_RUNTIME-"docker"}"

# Registry URLs
ECR_EU_WEST="268558157000.dkr.ecr.eu-west-1.amazonaws.com"
ECR_US_EAST="268558157000.dkr.ecr.us-east-1.amazonaws.com"
ECR_SEARCH_US_EAST="901841024863.dkr.ecr.us-east-1.amazonaws.com"

setup_validate_container_runtime() {
case "${CONTAINER_RUNTIME}" in
"podman")
Expand Down Expand Up @@ -104,26 +109,44 @@ if [[ ! -f "${CONFIG_PATH}" ]]; then
write_file '{}' "${CONFIG_PATH}"
fi

if [[ -f "${CONFIG_PATH}" ]]; then
if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then
echo "Checking if container registry credentials are valid..."
ecr_auth=$(exec_cmd jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}")
check_if_login_required() {
echo "Checking if container registry credentials are valid..."

check_registry_credentials() {
registry_url=$1
image_path=$1
image_tag=$2
# shellcheck disable=SC2016
ecr_auth=$(exec_cmd jq -r --arg registry "${registry_url}" '.auths.[$registry].auth // empty' "${CONFIG_PATH}")

if [[ -n "${ecr_auth}" ]]; then
http_status=$(curl --head -s -o /dev/null -w "%{http_code}" --max-time 3 "https://268558157000.dkr.ecr.us-east-1.amazonaws.com/v2/dev/mongodb-kubernetes/manifests/latest" \
http_status=$(curl --head -s -o /dev/null -w "%{http_code}" --max-time 3 "https://${registry_url}/v2/${image_path}/manifest/${image_tag}" \
-H "Authorization: Basic ${ecr_auth}" 2>/dev/null || echo "error/timeout")

if [[ "${http_status}" != "401" && "${http_status}" != "403" && "${http_status}" != "error/timeout" ]]; then
echo "Container registry credentials are up to date - not performing the new login!"
exit
return 0
fi
echo "Container login required (HTTP status: ${http_status})"
echo -e "${RED}Container login required (HTTP status: ${http_status})${NO_COLOR}"
else
echo "No ECR credentials found in container config - login required"
echo -e "${RED}No ECR credentials found in container config - login required${NO_COLOR}"
fi

return 0
}

check_registry_credentials "${ECR_EU_WEST}" "mongot/community" "1.47.0" | prepend "${ECR_EU_WEST}" || return 1
check_registry_credentials "${ECR_US_EAST}" "dev/mongodb-kubernetes" "latest" | prepend "${ECR_US_EAST}" || return 1
if [[ "${MDB_SEARCH_AWS_SSO_LOGIN:-"false"}" == "true" ]]; then
check_registry_credentials "${ECR_SEARCH_US_EAST}" "mongot-community/rapid-releases" "latest" | prepend "${ECR_SEARCH_US_EAST}" || return 1
fi

return 0
}

login_to_registries() {
title "Performing container login to ECR registries"
echo "$(aws --version)}"

# There could be some leftovers on Evergreen (Docker-specific, skip for Podman)
if [[ "${CONTAINER_RUNTIME}" == "docker" ]]; then
Expand All @@ -134,34 +157,42 @@ if [[ -f "${CONFIG_PATH}" ]]; then
remove_element "credHelpers"
fi
fi
fi

aws ecr get-login-password --region "us-east-1" | registry_login "AWS" "${ECR_US_EAST}"

echo "$(aws --version)}"
if [[ "${MDB_SEARCH_AWS_SSO_LOGIN:"-false"}" == "true" ]]; then
aws sso login --profile devprod-platforms-ecr-user
aws --profile devprod-platforms-ecr-user ecr get-login-password --region us-east-1 | registry_login "AWS" "${ECR_SEARCH_US_EAST}"
fi

aws ecr get-login-password --region "us-east-1" | registry_login "AWS" "268558157000.dkr.ecr.us-east-1.amazonaws.com"
# by default docker tries to store credentials in an external storage (e.g. OS keychain) - not in the config.json
# We need to store it as base64 string in config.json instead so we need to remove the "credsStore" element
# This is Docker-specific behavior, Podman stores credentials directly in auth.json
if [[ "${CONTAINER_RUNTIME}" == "docker" ]] && exec_cmd grep -q "credsStore" "${CONFIG_PATH}"; then
remove_element "credsStore"

# by default docker tries to store credentials in an external storage (e.g. OS keychain) - not in the config.json
# We need to store it as base64 string in config.json instead so we need to remove the "credsStore" element
# This is Docker-specific behavior, Podman stores credentials directly in auth.json
if [[ "${CONTAINER_RUNTIME}" == "docker" ]] && exec_cmd grep -q "credsStore" "${CONFIG_PATH}"; then
remove_element "credsStore"
# login again to store the credentials into the config.json
aws ecr get-login-password --region "us-east-1" | registry_login "AWS" "${ECR_US_EAST}"
fi

# login again to store the credentials into the config.json
aws ecr get-login-password --region "us-east-1" | registry_login "AWS" "268558157000.dkr.ecr.us-east-1.amazonaws.com"
fi
aws ecr get-login-password --region "eu-west-1" | registry_login "AWS" "${ECR_EU_WEST}"

if [[ -n "${PRERELEASE_PULLSECRET_DOCKERCONFIGJSON:-}" ]]; then
# log in to quay.io for the mongodb/mongodb-search-community private repo
# TODO remove once we switch to the official repo in Public Preview
quay_io_auth_file=$(mktemp)
config_tmp=$(mktemp)
echo "${PRERELEASE_PULLSECRET_DOCKERCONFIGJSON}" | base64 -d > "${quay_io_auth_file}"
exec_cmd jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}"
exec_cmd mv "${config_tmp}" "${CONFIG_PATH}"
rm "${quay_io_auth_file}"
fi
}

aws ecr get-login-password --region "eu-west-1" | registry_login "AWS" "268558157000.dkr.ecr.eu-west-1.amazonaws.com"

if [[ -n "${PRERELEASE_PULLSECRET_DOCKERCONFIGJSON:-}" ]]; then
# log in to quay.io for the mongodb/mongodb-search-community private repo
# TODO remove once we switch to the official repo in Public Preview
quay_io_auth_file=$(mktemp)
config_tmp=$(mktemp)
echo "${PRERELEASE_PULLSECRET_DOCKERCONFIGJSON}" | base64 -d > "${quay_io_auth_file}"
exec_cmd jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}"
exec_cmd mv "${config_tmp}" "${CONFIG_PATH}"
rm "${quay_io_auth_file}"
if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then
check_if_login_required
else
login_to_registries
fi

create_image_registries_secret
4 changes: 4 additions & 0 deletions scripts/dev/contexts/private-context-template
Original file line number Diff line number Diff line change
Expand Up @@ -101,3 +101,7 @@ export PRERELEASE_PULLSECRET_DOCKERCONFIGJSON="<dockerconfigjson secret>"

# uncomment to enable license update with pre-commit script
# export MDB_UPDATE_LICENSES=true

# enable only for getting mongodb search rapid-releases from
# 901841024863.dkr.ecr.us-east-1.amazonaws.com/mongot-community/rapid-releases
export MDB_SEARCH_AWS_SSO_LOGIN="false"
203 changes: 203 additions & 0 deletions scripts/dev/publish_artifacts_to_staging_quay.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@
#!/bin/bash
#
# Script to publish (mirror) container images and helm charts to staging registry.
#

set -euo pipefail

test "${MDB_BASH_DEBUG:-0}" -eq 1 && set -x

source scripts/dev/set_env_context.sh
source scripts/funcs/printing

BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev"
STAGING_BASE_URL="quay.io/mongodb/staging"

if [[ $# -ne 2 ]]; then
echo "The tool mirrors images built in any given evg patch id (or latest) from ${BASE_REPO_URL} to ${STAGING_BASE_URL}"
echo "It publishes helm oci image of the helm chart with chart version \"<prerelease helm version>-<version_id>\""
echo "Usage: $0 <prerelease helm version> <version_id>"
echo "Example: $0 1.4.0-prerelease 68b1a853973bae0007d5eaa0"
echo ""
exit 1
fi

helm_chart_version_prefix="$1"
operator_version="$2"
search_version=${3:-"latest"}

helm_chart_version="${helm_chart_version_prefix}-${operator_version}"

get_arch_digest() {
local image="$1"
local arch="$2"
local manifest_json
manifest_json=$(docker buildx imagetools inspect --raw "${image}" 2>/dev/null || echo '{}')

local media_type
media_type=$(echo "$manifest_json" | jq -r '.mediaType // empty')

if [[ "$media_type" == *"manifest.list"* ]]; then
# this is a multi-arch manifest
local arch_digest
arch_digest=$(echo "$manifest_json" | jq -r ".manifests[] | select(.platform.architecture == \"${arch}\" and .platform.os == \"linux\") | .digest")

if [[ -n "$arch_digest" && "$arch_digest" != "null" ]]; then
echo "$arch_digest"
return 0
fi
elif [[ "${arch}" == "amd64" ]]; then
# otherwise it must be a single-arch (image) manifest, so we return it only if we ask for amd64
local arch_digest
arch_digest="sha256:$(echo -n "$manifest_json" | sha256)"
echo "$arch_digest"
fi

echo ""
return 0
}

process_image() {
local source_image="$1"
local target_image="$2"

echo " Processing ${source_image}..."

local digest_arm64
local digest_amd64
digest_arm64=$(get_arch_digest "${source_image}" arm64)
digest_amd64=$(get_arch_digest "${source_image}" amd64)

if [[ -n "${digest_amd64}" ]]; then
docker pull "${source_image}@${digest_amd64}"
docker tag "${source_image}@${digest_amd64}" "${target_image}-amd64"
docker push "${target_image}-amd64"
fi

if [[ -n "${digest_arm64}" ]]; then
docker pull "${source_image}@${digest_arm64}"
docker tag "${source_image}@${digest_arm64}" "${target_image}-arm64"
docker push "${target_image}-arm64"
fi

docker manifest create "${target_image}" ${digest_amd64:+--amend ${target_image}-amd64} ${digest_arm64:+--amend ${target_image}-arm64}
docker manifest push "${target_image}"
}

publish_images() {
local names=()
local sources=()
local destinations=()

operator_images=(
"mongodb-kubernetes"
"mongodb-kubernetes-database"
"mongodb-kubernetes-init-appdb"
"mongodb-kubernetes-init-database"
"mongodb-kubernetes-init-ops-manager"
"mongodb-kubernetes-readinessprobe"
"mongodb-kubernetes-operator-version-upgrade-post-start-hook"
)

if [[ -n "${search_version}" ]]; then
names+=("mongodb-search")
sources+=("901841024863.dkr.ecr.us-east-1.amazonaws.com/mongot-community/rapid-releases:${search_version}")
destinations+=("${STAGING_BASE_URL}/mongodb-search:${search_version}")
fi

for image in "${operator_images[@]}"; do
names+=("${image}")
sources+=("${BASE_REPO_URL}/${image}:${operator_version}")
destinations+=("${STAGING_BASE_URL}/${image}:${helm_chart_version}")
done

echo "Starting Docker image re-tagging and publishing to staging..."
echo "Version ID: ${operator_version}"
echo "Source repository: ${BASE_REPO_URL}"
echo "Target repository: ${STAGING_BASE_URL}"
echo ""

for i in "${!names[@]}"; do
process_image "${sources[$i]}" "${destinations[$i]}"
done

echo "=== SUMMARY ==="
echo "All images have been successfully re-tagged and pushed to staging!"
echo ""
echo "Images processed:"
for i in "${!names[@]}"; do
echo " ${names[$i]}: ${sources[$i]} -> ${destinations[$i]}"
done
}

update_helm_values() {
scripts/evergreen/release/update_helm_values_files.py
yq eval ".version = \"${helm_chart_version}\"" -i helm_chart/Chart.yaml
echo "Updated helm_chart/Chart.yaml version to: ${helm_chart_version}"

yq eval \
".registry.operator = \"${STAGING_BASE_URL}\" |
.registry.database = \"${STAGING_BASE_URL}\" |
.registry.initDatabase = \"${STAGING_BASE_URL}\" |
.registry.initOpsManager = \"${STAGING_BASE_URL}\" |
.registry.initAppDb = \"${STAGING_BASE_URL}\" |
.registry.appDb = \"${STAGING_BASE_URL}\" |
.registry.versionUpgradeHook = \"${STAGING_BASE_URL}\" |
.registry.readinessProbe = \"${STAGING_BASE_URL}\"
" -i helm_chart/values.yaml
echo "Updated helm_chart/values.yaml registry to: ${STAGING_BASE_URL}"
}

prepare_helm_oci_image() {
mkdir -p tmp
helm package helm_chart -d tmp/
}

push_helm_oci_image() {
export HELM_REGISTRY_CONFIG=~/.docker/config.json
helm push "tmp/mongodb-kubernetes-${helm_chart_version}.tgz" "oci://${STAGING_BASE_URL}/helm-chart"
}

update_release_json() {
if [[ ! -f "release.json" ]]; then
echo "Error: release.json file not found"
exit 1
fi

echo "Updating release.json with versions..."

# Update operator and init versions
jq --arg version "${helm_chart_version}" \
--arg registry "${STAGING_BASE_URL}" \
'.mongodbOperator = $version |
.initDatabaseVersion = $version |
.initOpsManagerVersion = $version |
.initAppDbVersion = $version |
.databaseImageVersion = $version' \
release.json > release.json.tmp && mv release.json.tmp release.json

# Update search community version
jq --arg searchVersion "${search_version}" \
--arg searchRepo "${STAGING_BASE_URL}" \
--arg searchImageName "mongodb-search" \
'.search.community.repo = $searchRepo |
.search.community.name = $searchImageName |
.search.community.version = $searchVersion' \
release.json > release.json.tmp && mv release.json.tmp release.json

echo "Updated release.json with:"
echo " - Operator versions: ${helm_chart_version}"
echo " - Search community version: ${MDB_SEARCH_COMMUNITY_VERSION}"
}

revert_changes_to_local_files() {
echo "Reverting generated/updated files: helm_chart/ public/ config/ release.json"
git checkout -- helm_chart/ public/ config/ release.json
}

publish_images
update_release_json
update_helm_values
prepare_helm_oci_image
push_helm_oci_image
revert_changes_to_local_files
6 changes: 6 additions & 0 deletions scripts/evergreen/release/update_helm_values_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,12 @@ def update_helm_charts(operator_version, release):
set_value_in_yaml_file(
"helm_chart/values.yaml", "search.community.version", release["search"]["community"]["version"]
)
set_value_in_yaml_file(
"helm_chart/values.yaml", "search.community.repo", release["search"]["community"]["repo"]
)
set_value_in_yaml_file(
"helm_chart/values.yaml", "search.community.name", release["search"]["community"]["name"]
)


def update_cluster_service_version(operator_version):
Expand Down