From b341dc3fefcefcefc20e91c6062652c79b9ce1dd Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Tue, 29 Jul 2025 09:58:29 +0200 Subject: [PATCH 001/164] Dockerfiles --- docker/mongodb-agent-non-matrix/README.md | 2 +- docker/mongodb-agent/Dockerfile | 54 +++++++++++++++---- docker/mongodb-agent/README.md | 27 ++++++---- docker/mongodb-kubernetes-database/Dockerfile | 3 +- docker/mongodb-kubernetes-database/README.md | 7 ++- .../mongodb-kubernetes-init-appdb/Dockerfile | 29 +++++++--- .../mongodb-kubernetes-init-appdb/README.md | 13 +++-- .../Dockerfile | 31 ++++++++--- .../README.md | 15 ++++-- .../content/agent-launcher-lib.sh | 21 ++++++-- docker/mongodb-kubernetes-operator/Dockerfile | 9 ++-- docker/mongodb-kubernetes-operator/README.md | 7 ++- .../Dockerfile | 5 +- .../Dockerfile | 5 +- 14 files changed, 167 insertions(+), 61 deletions(-) diff --git a/docker/mongodb-agent-non-matrix/README.md b/docker/mongodb-agent-non-matrix/README.md index 79dc0d2d5..b51bef808 100644 --- a/docker/mongodb-agent-non-matrix/README.md +++ b/docker/mongodb-agent-non-matrix/README.md @@ -8,7 +8,7 @@ AGENT_VERSION="108.0.7.8810-1" TOOLS_VERSION="100.12.0" AGENT_DISTRO="rhel9_x86_64" TOOLS_DISTRO="rhel93-x86_64" -docker buildx build --load --progress plain . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${AGENT_VERSION}" \ +docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${AGENT_VERSION}" \ --build-arg version="${VERSION}" \ --build-arg agent_version="${AGENT_VERSION}" \ --build-arg tools_version="${TOOLS_VERSION}" \ diff --git a/docker/mongodb-agent/Dockerfile b/docker/mongodb-agent/Dockerfile index 5ec4e127b..a9a910e56 100644 --- a/docker/mongodb-agent/Dockerfile +++ b/docker/mongodb-agent/Dockerfile @@ -1,9 +1,43 @@ # the init database image gets supplied by pipeline.py and corresponds to the operator version we want to release # the agent with. This enables us to release the agent for older operator. ARG init_database_image + +FROM --platform=${BUILDPLATFORM} scratch AS tools_downloader + +ARG mongodb_tools_url + +ARG mongodb_tools_version_amd64 +ADD "${mongodb_tools_url}/${mongodb_tools_version_amd64}" /data/amd64/mongodb_tools.tgz + +ARG mongodb_tools_version_arm64 +ADD "${mongodb_tools_url}/${mongodb_tools_version_arm64}" /data/arm64/mongodb_tools.tgz + +ARG mongodb_tools_version_s390x +ADD "${mongodb_tools_url}/${mongodb_tools_version_s390x}" /data/s390x/mongodb_tools.tgz + +ARG mongodb_tools_version_ppc64le +ADD "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}" /data/ppc64le/mongodb_tools.tgz + +FROM --platform=${BUILDPLATFORM} scratch AS agent_downloader + +ARG mongodb_agent_url + +ARG mongodb_agent_version_amd64 +ADD "${mongodb_agent_url}/${mongodb_agent_version_amd64}" /data/amd64/mongodb_agent.tgz + +ARG mongodb_agent_version_arm64 +ADD "${mongodb_agent_url}/${mongodb_agent_version_arm64}" /data/arm64/mongodb_agent.tgz + +ARG mongodb_agent_version_s390x +ADD "${mongodb_agent_url}/${mongodb_agent_version_s390x}" /data/s390x/mongodb_agent.tgz + +ARG mongodb_agent_version_ppc64le +ADD "${mongodb_agent_url}/${mongodb_agent_version_ppc64le}" /data/ppc64le/mongodb_agent.tgz + + FROM ${init_database_image} AS init_database -FROM public.ecr.aws/docker/library/golang:1.24 AS dependency_downloader +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS dependency_downloader WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ @@ -11,26 +45,23 @@ COPY go.mod go.sum ./ RUN go mod download -FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ +ARG TARGETOS +ARG TARGETARCH COPY --from=dependency_downloader /go/pkg /go/pkg COPY . /go/src/github.com/mongodb/mongodb-kubernetes -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go FROM scratch AS base -ARG mongodb_tools_url_ubi -ARG mongodb_agent_url_ubi COPY --from=readiness_builder /readinessprobe /data/ COPY --from=readiness_builder /version-upgrade-hook /data/ -ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz -ADD ${mongodb_agent_url_ubi} /data/mongodb_agent_ubi.tgz - COPY --from=init_database /probes/probe.sh /data/probe.sh COPY --from=init_database /scripts/agent-launcher-lib.sh /data/ COPY --from=init_database /scripts/agent-launcher.sh /data/ @@ -76,8 +107,9 @@ RUN microdnf install -y --disableplugin=subscription-manager \ && rm -rf /var/lib/apt/lists/* -COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz -COPY --from=base /data/mongodb_agent_ubi.tgz /agent/mongodb_agent.tgz +ARG TARGETARCH +COPY --from=tools_downloader "/data/${TARGETARCH}/mongodb_tools.tgz" /tools/mongodb_tools.tgz +COPY --from=agent_downloader "/data/${TARGETARCH}/mongodb_agent.tgz" /agent/mongodb_agent.tgz RUN tar xfz /tools/mongodb_tools.tgz RUN mv mongodb-database-tools-*/bin/* /tools diff --git a/docker/mongodb-agent/README.md b/docker/mongodb-agent/README.md index a447d60f0..7cbb7d5ae 100644 --- a/docker/mongodb-agent/README.md +++ b/docker/mongodb-agent/README.md @@ -8,13 +8,22 @@ binaries from there. Then we continue with the other steps to fully build the im For building the MongoDB Agent image locally use the example command: ```bash -VERSION="108.0.7.8810-1" -INIT_DATABASE_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database:1.1.0" -MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" -MONGODB_AGENT_URL_UBI="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-108.0.7.8810-1.rhel9_x86_64.tar.gz" -docker buildx build --load --progress plain . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${VERSION}_1.1.0" \ - --build-arg version="${VERSION}" \ - --build-arg init_database_image="${INIT_DATABASE_IMAGE}" \ - --build-arg mongodb_tools_url_ubi="${MONGODB_TOOLS_URL_UBI}" \ - --build-arg mongodb_agent_url_ubi="${MONGODB_AGENT_URL_UBI}" +AGENT_VERSION="108.0.7.8810-1" +INIT_DATABASE_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/mongodb-kubernetes-init-database:evergreen" +MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" +MONGODB_AGENT_URL="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" +docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${VERSION}" \ + --build-arg version="${VERSION}" \ + --build-arg init_database_image="${INIT_DATABASE_IMAGE}" \ + --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL}" \ + --build-arg mongodb_agent_url="${MONGODB_AGENT_URL}" \ + --build-arg mongodb_agent_version_s390x="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz" \ + --build-arg mongodb_agent_version_ppc64le="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" \ + --build-arg mongodb_agent_version_amd64="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" \ + --build-arg mongodb_agent_version_arm64="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" \ + --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" \ + --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" \ + --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-100.12.0.tgz" \ + --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" + ``` diff --git a/docker/mongodb-kubernetes-database/Dockerfile b/docker/mongodb-kubernetes-database/Dockerfile index 97fbda8d0..43f705fdd 100644 --- a/docker/mongodb-kubernetes-database/Dockerfile +++ b/docker/mongodb-kubernetes-database/Dockerfile @@ -41,7 +41,8 @@ RUN microdnf install -y --disableplugin=subscription-manager \ jq \ tar \ xz-libs \ - findutils + findutils \ + cpio RUN ln -s /usr/lib64/libsasl2.so.3 /usr/lib64/libsasl2.so.2 diff --git a/docker/mongodb-kubernetes-database/README.md b/docker/mongodb-kubernetes-database/README.md index e7b937e0e..ae3e3fde4 100644 --- a/docker/mongodb-kubernetes-database/README.md +++ b/docker/mongodb-kubernetes-database/README.md @@ -39,7 +39,10 @@ this images with. For building the MongoDB Database image locally use the example command: ```bash -VERSION="1.0.1" -docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-database/Dockerfile -t "mongodb-kubernetes-database:${VERSION}" \ +VERSION="1.3.0" +BASE_REPO_URL="" +docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-database:${VERSION}" \ --build-arg VERSION="${VERSION}" + +docker push "${BASE_REPO_URL}mongodb-kubernetes-database:${VERSION}" ``` diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile b/docker/mongodb-kubernetes-init-appdb/Dockerfile index ed0cea9dd..704361276 100644 --- a/docker/mongodb-kubernetes-init-appdb/Dockerfile +++ b/docker/mongodb-kubernetes-init-appdb/Dockerfile @@ -1,19 +1,18 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder + +ARG TARGETOS +ARG TARGETARCH COPY . /go/src/github.com/mongodb/mongodb-kubernetes WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go FROM scratch AS base -ARG mongodb_tools_url_ubi - COPY --from=readiness_builder /readinessprobe /data/ COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook -ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz - COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh /data/scripts/ @@ -23,6 +22,7 @@ COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ FROM registry.access.redhat.com/ubi8/ubi-minimal +ARG TARGETPLATFORM ARG version LABEL name="MongoDB Kubernetes Init AppDB" \ version="mongodb-kubernetes-init-appdb-${version}" \ @@ -42,7 +42,20 @@ RUN microdnf -y update --nodocs \ && microdnf -y install --nodocs tar gzip \ && microdnf clean all -COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +ARG mongodb_tools_url +ARG mongodb_tools_version_s390x +ARG mongodb_tools_version_ppc64le +ARG mongodb_tools_version_amd64 +ARG mongodb_tools_version_arm64 + +RUN case ${TARGETPLATFORM} in \ + "linux/amd64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_amd64} ;; \ + "linux/arm64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_arm64} ;; \ + "linux/s390x") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_s390x} ;; \ + "linux/ppc64le") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_ppc64le} ;; \ + esac \ + && mkdir -p /tools \ + && curl -o /tools/mongodb_tools.tgz "${mongodb_tools_url}/${MONGODB_TOOLS_VERSION}" RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ && rm /tools/mongodb_tools.tgz diff --git a/docker/mongodb-kubernetes-init-appdb/README.md b/docker/mongodb-kubernetes-init-appdb/README.md index d49ca4b3a..72e31c3dd 100644 --- a/docker/mongodb-kubernetes-init-appdb/README.md +++ b/docker/mongodb-kubernetes-init-appdb/README.md @@ -3,9 +3,14 @@ For building the MongoDB Init AppDB image locally use the example command: ```bash -VERSION="1.0.1" -MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" -docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-init-appdb/Dockerfile -t "mongodb-kubernetes-init-appdb:${VERSION}" \ +VERSION="1.3.0" +MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" +BASE_REPO_URL="" +docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-init-appdb/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-init-appdb:${VERSION}" \ --build-arg version="${VERSION}" \ - --build-arg mongodb_tools_url_ubi="${MONGODB_TOOLS_URL_UBI}" + --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL_UBI}" \ + --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" \ + --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" \ + --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-100.12.0.tgz" \ + --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" ``` diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile b/docker/mongodb-kubernetes-init-database/Dockerfile index 6c861fb6a..9319b8fce 100644 --- a/docker/mongodb-kubernetes-init-database/Dockerfile +++ b/docker/mongodb-kubernetes-init-database/Dockerfile @@ -1,19 +1,18 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder + +ARG TARGETOS +ARG TARGETARCH COPY . /go/src/github.com/mongodb/mongodb-kubernetes WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go FROM scratch AS base -ARG mongodb_tools_url_ubi - COPY --from=readiness_builder /readinessprobe /data/ COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook -ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz - COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh /data/scripts/ @@ -21,8 +20,10 @@ COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher.sh /data/s COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ +#TODO ubi9? FROM registry.access.redhat.com/ubi8/ubi-minimal +ARG TARGETPLATFORM ARG version LABEL name="MongoDB Kubernetes Init Database" \ version="mongodb-kubernetes-init-database-${version}" \ @@ -41,10 +42,24 @@ RUN microdnf -y update --nodocs \ && microdnf -y install --nodocs tar gzip \ && microdnf clean all -COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +ARG mongodb_tools_url +ARG mongodb_tools_version_s390x +ARG mongodb_tools_version_ppc64le +ARG mongodb_tools_version_amd64 +ARG mongodb_tools_version_arm64 + +RUN case ${TARGETPLATFORM} in \ + "linux/amd64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_amd64} ;; \ + "linux/arm64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_arm64} ;; \ + "linux/s390x") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_s390x} ;; \ + "linux/ppc64le") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_ppc64le} ;; \ + esac \ + && mkdir -p /tools \ + && curl -o /tools/mongodb_tools.tgz "${mongodb_tools_url}/${MONGODB_TOOLS_VERSION}" RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ && rm /tools/mongodb_tools.tgz USER 2000 + ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] diff --git a/docker/mongodb-kubernetes-init-database/README.md b/docker/mongodb-kubernetes-init-database/README.md index 0e6657531..8b0b16787 100644 --- a/docker/mongodb-kubernetes-init-database/README.md +++ b/docker/mongodb-kubernetes-init-database/README.md @@ -3,9 +3,16 @@ For building the MongoDB Init AppDB image locally use the example command: ```bash -VERSION="1.0.1" -MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" -docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-init-database/Dockerfile -t "mongodb-kubernetes-init-database:${VERSION}" \ +VERSION="1.3.0" +MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db" +BASE_REPO_URL="" +docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-init-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" \ --build-arg version="${VERSION}" \ - --build-arg mongodb_tools_url_ubi="${MONGODB_TOOLS_URL_UBI}" + --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL_UBI}" \ + --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" \ + --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" \ + --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-100.12.0.tgz" \ + --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" + +docker push "${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" ``` diff --git a/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh b/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh index eaed81cf0..01060832b 100755 --- a/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh +++ b/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh @@ -91,10 +91,21 @@ download_agent() { AGENT_VERSION="${MDB_AGENT_VERSION}" fi + if [ "$(arch)" = "x86_64" ]; then + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" + elif [ "$(arch)" = "arm64" ]; then + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" + elif [ "$(arch)" = "ppc64le" ]; then + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" + elif [ "$(arch)" = "s390x" ]; then + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz" + fi + script_log "Downloading Agent version: ${AGENT_VERSION}" script_log "Downloading a Mongodb Agent from ${base_url:?}" curl_opts=( - "${base_url}/download/agent/automation/mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" + "${base_url}/download/agent/automation/${AGENT_FILE}" + "--location" "--silent" "--retry" "3" "--fail" "-v" "--output" "automation-agent.tar.gz" ); @@ -117,13 +128,15 @@ download_agent() { rm "${MMS_LOG_DIR}/curl.log" 2>/dev/null || true script_log "The Mongodb Agent binary downloaded, unpacking" + + mkdir -p "${MMS_HOME}/files" tar -xzf automation-agent.tar.gz AGENT_VERSION=$(find . -name "mongodb-mms-automation-agent-*" | awk -F"-" '{ print $5 }') - mkdir -p "${MMS_HOME}/files" - echo "${AGENT_VERSION}" >"${MMS_HOME}/files/agent-version" mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent "${MMS_HOME}/files/" + rm -rf automation-agent.tar.gz mongodb-mms-automation-agent-*.* + + echo "${AGENT_VERSION}" >"${MMS_HOME}/files/agent-version" chmod +x "${MMS_HOME}/files/mongodb-mms-automation-agent" - rm -rf automation-agent.tar.gz mongodb-mms-automation-agent-*.linux_x86_64 script_log "The Automation Agent was deployed at ${MMS_HOME}/files/mongodb-mms-automation-agent" popd >/dev/null || true diff --git a/docker/mongodb-kubernetes-operator/Dockerfile b/docker/mongodb-kubernetes-operator/Dockerfile index dcd3af35c..22eb0c70a 100644 --- a/docker/mongodb-kubernetes-operator/Dockerfile +++ b/docker/mongodb-kubernetes-operator/Dockerfile @@ -1,9 +1,12 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder ARG version ARG log_automation_config_diff ARG use_race +ARG TARGETOS +ARG TARGETARCH + COPY go.sum go.mod /go/src/github.com/mongodb/mongodb-kubernetes/ WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes @@ -16,14 +19,14 @@ RUN git version RUN mkdir /build && \ if [ $use_race = "true" ]; then \ echo "Building with race detector" && \ - CGO_ENABLED=1 go build -o /build/mongodb-kubernetes-operator \ + CGO_ENABLED=1 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /build/mongodb-kubernetes-operator \ -buildvcs=false \ -race \ -ldflags=" -X github.com/mongodb/mongodb-kubernetes/pkg/util.OperatorVersion=${version} \ -X github.com/mongodb/mongodb-kubernetes/pkg/util.LogAutomationConfigDiff=${log_automation_config_diff}"; \ else \ echo "Building without race detector" && \ - CGO_ENABLED=0 go build -o /build/mongodb-kubernetes-operator \ + CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /build/mongodb-kubernetes-operator \ -buildvcs=false \ -ldflags="-s -w -X github.com/mongodb/mongodb-kubernetes/pkg/util.OperatorVersion=${version} \ -X github.com/mongodb/mongodb-kubernetes/pkg/util.LogAutomationConfigDiff=${log_automation_config_diff}"; \ diff --git a/docker/mongodb-kubernetes-operator/README.md b/docker/mongodb-kubernetes-operator/README.md index 8335c1d79..adb532345 100644 --- a/docker/mongodb-kubernetes-operator/README.md +++ b/docker/mongodb-kubernetes-operator/README.md @@ -13,13 +13,16 @@ CGO_ENABLED=0 GOOS=linux GOFLAGS="-mod=vendor" go build -i -o mongodb-kubernetes For building the MongoDB Init Ops Manager image locally use the example command: ```bash -VERSION="1.1.0" +VERSION="1.3.0" LOG_AUTOMATION_CONFIG_DIFF="false" USE_RACE="false" -docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-operator/Dockerfile -t "mongodb-kubernetes-operator:${VERSION}" \ +BASE_REPO_URL="" +docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-operator/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes:${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg log_automation_config_diff="${LOG_AUTOMATION_CONFIG_DIFF}" \ --build-arg use_race="${USE_RACE}" + +docker push "${BASE_REPO_URL}mongodb-kubernetes:${VERSION}" ``` ### Running locally diff --git a/docker/mongodb-kubernetes-readinessprobe/Dockerfile b/docker/mongodb-kubernetes-readinessprobe/Dockerfile index a2f3159b4..d3fcb0a8e 100644 --- a/docker/mongodb-kubernetes-readinessprobe/Dockerfile +++ b/docker/mongodb-kubernetes-readinessprobe/Dockerfile @@ -1,10 +1,11 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder WORKDIR /go/src ADD . . +ARG TARGETOS ARG TARGETARCH -RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} go build -a -o /data/scripts/readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -a -o /data/scripts/readinessprobe ./mongodb-community-operator/cmd/readiness/main.go FROM registry.access.redhat.com/ubi9/ubi-minimal diff --git a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile index 5005f5801..90455d85d 100644 --- a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile +++ b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile @@ -1,10 +1,11 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder WORKDIR /go/src ADD . . ARG TARGETARCH -RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} go build -a -o /data/scripts/version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go +ARG TARGETOS +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -a -o /data/scripts/version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go FROM registry.access.redhat.com/ubi9/ubi-minimal From a2479b8f6363c51cc0bb1dc3a1367d34f3d782e0 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Tue, 29 Jul 2025 15:33:05 +0200 Subject: [PATCH 002/164] Optimize init database --- .../Dockerfile | 72 ++++++++++--------- 1 file changed, 40 insertions(+), 32 deletions(-) diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile b/docker/mongodb-kubernetes-init-database/Dockerfile index 9319b8fce..3f38f7870 100644 --- a/docker/mongodb-kubernetes-init-database/Dockerfile +++ b/docker/mongodb-kubernetes-init-database/Dockerfile @@ -1,16 +1,37 @@ +FROM scratch AS tools_downloader + +ARG mongodb_tools_url + +ARG mongodb_tools_version_amd64 +ADD "${mongodb_tools_url}/${mongodb_tools_version_amd64}" /data/amd64/mongodb_tools.tgz + +ARG mongodb_tools_version_arm64 +ADD "${mongodb_tools_url}/${mongodb_tools_version_arm64}" /data/arm64/mongodb_tools.tgz + +ARG mongodb_tools_version_s390x +ADD "${mongodb_tools_url}/${mongodb_tools_version_s390x}" /data/s390x/mongodb_tools.tgz + +ARG mongodb_tools_version_ppc64le +ADD "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}" /data/ppc64le/mongodb_tools.tgz + FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ + +COPY go.mod go.sum ./ + +RUN go mod download + +COPY mongodb-community-operator /go/src/github.com/mongodb/mongodb-kubernetes/mongodb-community-operator + ARG TARGETOS ARG TARGETARCH - -COPY . /go/src/github.com/mongodb/mongodb-kubernetes -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go FROM scratch AS base -COPY --from=readiness_builder /readinessprobe /data/ +COPY --from=readiness_builder /readinessprobe /data/readinessprobe COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh @@ -23,7 +44,21 @@ COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ #TODO ubi9? FROM registry.access.redhat.com/ubi8/ubi-minimal -ARG TARGETPLATFORM +ARG TARGETARCH +COPY --from=tools_downloader /data/${TARGETARCH}/mongodb_tools.tgz /tools/mongodb_tools.tgz + +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ + ARG version LABEL name="MongoDB Kubernetes Init Database" \ version="mongodb-kubernetes-init-database-${version}" \ @@ -33,33 +68,6 @@ LABEL name="MongoDB Kubernetes Init Database" \ vendor="MongoDB" \ maintainer="support@mongodb.com" -COPY --from=base /data/readinessprobe /probes/readinessprobe -COPY --from=base /data/probe.sh /probes/probe.sh -COPY --from=base /data/scripts/ /scripts/ -COPY --from=base /data/licenses /licenses/ - -RUN microdnf -y update --nodocs \ - && microdnf -y install --nodocs tar gzip \ - && microdnf clean all - -ARG mongodb_tools_url -ARG mongodb_tools_version_s390x -ARG mongodb_tools_version_ppc64le -ARG mongodb_tools_version_amd64 -ARG mongodb_tools_version_arm64 - -RUN case ${TARGETPLATFORM} in \ - "linux/amd64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_amd64} ;; \ - "linux/arm64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_arm64} ;; \ - "linux/s390x") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_s390x} ;; \ - "linux/ppc64le") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_ppc64le} ;; \ - esac \ - && mkdir -p /tools \ - && curl -o /tools/mongodb_tools.tgz "${mongodb_tools_url}/${MONGODB_TOOLS_VERSION}" - -RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ - && rm /tools/mongodb_tools.tgz - USER 2000 ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] From abe4a2c805afc764009783014c034c8c41612a5f Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Tue, 29 Jul 2025 15:42:33 +0200 Subject: [PATCH 003/164] Optimize operator --- docker/mongodb-kubernetes-operator/Dockerfile | 40 +++++++++---------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/docker/mongodb-kubernetes-operator/Dockerfile b/docker/mongodb-kubernetes-operator/Dockerfile index 22eb0c70a..1b2fb371d 100644 --- a/docker/mongodb-kubernetes-operator/Dockerfile +++ b/docker/mongodb-kubernetes-operator/Dockerfile @@ -1,11 +1,7 @@ FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder -ARG version -ARG log_automation_config_diff -ARG use_race - -ARG TARGETOS -ARG TARGETARCH +ADD https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 /usr/local/bin/jq +RUN chmod +x /usr/local/bin/jq COPY go.sum go.mod /go/src/github.com/mongodb/mongodb-kubernetes/ @@ -14,8 +10,12 @@ RUN go mod download COPY . /go/src/github.com/mongodb/mongodb-kubernetes -RUN go version -RUN git version +ARG version +ARG log_automation_config_diff +ARG use_race +ARG TARGETOS +ARG TARGETARCH + RUN mkdir /build && \ if [ $use_race = "true" ]; then \ echo "Building with race detector" && \ @@ -32,10 +32,6 @@ RUN mkdir /build && \ -X github.com/mongodb/mongodb-kubernetes/pkg/util.LogAutomationConfigDiff=${log_automation_config_diff}"; \ fi - -ADD https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 /usr/local/bin/jq -RUN chmod +x /usr/local/bin/jq - RUN mkdir -p /data RUN cat release.json | jq -r '.supportedImages."mongodb-agent" | { "supportedImages": { "mongodb-agent": . } }' > /data/om_version_mapping.json RUN chmod +r /data/om_version_mapping.json @@ -49,16 +45,6 @@ ADD docker/mongodb-kubernetes-operator/licenses /data/licenses/ FROM registry.access.redhat.com/ubi9/ubi-minimal -ARG version - -LABEL name="MongoDB Kubernetes Operator" \ - maintainer="support@mongodb.com" \ - vendor="MongoDB" \ - version="${version}" \ - release="1" \ - summary="MongoDB Kubernetes Operator Image" \ - description="MongoDB Kubernetes Operator Image" - # Building an UBI-based image: https://red.ht/3n6b9y0 RUN microdnf update \ --disableplugin=subscription-manager \ @@ -70,6 +56,16 @@ COPY --from=base /data/mongodb-kubernetes-operator /usr/local/bin/mongodb-kubern COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json COPY --from=base /data/licenses /licenses/ +ARG version + +LABEL name="MongoDB Kubernetes Operator" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="${version}" \ + release="1" \ + summary="MongoDB Kubernetes Operator Image" \ + description="MongoDB Kubernetes Operator Image" + USER 2000 ENTRYPOINT exec /usr/local/bin/mongodb-kubernetes-operator From fd701b22266a511ca5fe45b06e95b095ace1bef5 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 29 Jul 2025 15:58:56 +0200 Subject: [PATCH 004/164] add minikube scripts --- .evergreen-functions.yml | 18 ++++++++++++++++++ .evergreen.yml | 27 +++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index c004dd098..3cd67d524 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -236,6 +236,15 @@ functions: - ${workdir}/bin binary: scripts/dev/setup_evg_host.sh + setup_ibm_host: &setup_ibm_host + command: subprocess.exec + type: setup + params: + working_dir: src/github.com/mongodb/mongodb-kubernetes + add_to_path: + - ${workdir}/bin + binary: scripts/dev/setup_minikube_host.sh + lint_repo: - command: subprocess.exec type: setup @@ -261,6 +270,15 @@ functions: - *setup_evg_host - *python_venv + # This differs for normal evg_host as we require minikube instead of kind for + # IBM machines + setup_building_host_minikube: + - *switch_context + - *setup_aws + - *configure_docker_auth + - *setup_ibm_host + - *python_venv + prune_docker_resources: - command: subprocess.exec type: setup diff --git a/.evergreen.yml b/.evergreen.yml index 209bf152a..17e882f95 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -77,6 +77,13 @@ variables: - func: download_kube_tools - func: setup_building_host + - &setup_group_ibm + setup_group_can_fail_task: true + setup_group: + - func: clone + - func: download_kube_tools + - func: setup_building_host_minikube + - &setup_group_multi_cluster setup_group_can_fail_task: true setup_group: @@ -1188,6 +1195,14 @@ task_groups: - e2e_om_ops_manager_backup <<: *teardown_group + - name: e2e_smoke_ibm_task_group + max_hosts: -1 + <<: *setup_group_ibm + <<: *setup_and_teardown_task + tasks: + - e2e_om_ops_manager_backup + <<: *teardown_group + - name: e2e_ops_manager_kind_5_0_only_task_group max_hosts: -1 <<: *setup_group @@ -1453,6 +1468,18 @@ buildvariants: tasks: - name: e2e_smoke_task_group + - name: e2e_smoke_ibm + display_name: e2e_smoke_ibm + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - rhel9-power-large + allowed_requesters: [ "patch", "github_tag" ] +# depends_on: +# - name: build_test_image +# variant: init_test_run + tasks: + - name: e2e_smoke_ibm_task_group + - name: e2e_static_smoke display_name: e2e_static_smoke tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] From 05cbe235bcd8af5aa2547631b891e0e0e783717f Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 29 Jul 2025 16:01:39 +0200 Subject: [PATCH 005/164] smoke test replicacset --- .evergreen.yml | 2 +- scripts/dev/contexts/e2e_smoke_ibm | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) create mode 100644 scripts/dev/contexts/e2e_smoke_ibm diff --git a/.evergreen.yml b/.evergreen.yml index 17e882f95..2d2e113ae 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1200,7 +1200,7 @@ task_groups: <<: *setup_group_ibm <<: *setup_and_teardown_task tasks: - - e2e_om_ops_manager_backup + - e2e_replica_set <<: *teardown_group - name: e2e_ops_manager_kind_5_0_only_task_group diff --git a/scripts/dev/contexts/e2e_smoke_ibm b/scripts/dev/contexts/e2e_smoke_ibm new file mode 100644 index 000000000..03384c26c --- /dev/null +++ b/scripts/dev/contexts/e2e_smoke_ibm @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" + +export ops_manager_version="cloud_qa" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION + +export CUSTOM_MDB_VERSION=6.0.5 +export CUSTOM_MDB_PREV_VERSION=5.0.7 From a1f3d90ed86606d7428f2499cfa3f140a19120c7 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 29 Jul 2025 16:26:38 +0200 Subject: [PATCH 006/164] smoke test replicacset --- scripts/evergreen/setup_kubectl.sh | 40 ++++++++++++++++++++++++++---- 1 file changed, 35 insertions(+), 5 deletions(-) diff --git a/scripts/evergreen/setup_kubectl.sh b/scripts/evergreen/setup_kubectl.sh index ab9066ac1..00cf975fd 100755 --- a/scripts/evergreen/setup_kubectl.sh +++ b/scripts/evergreen/setup_kubectl.sh @@ -3,22 +3,52 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +# Detect system architecture and map to kubectl/helm architecture names +detect_architecture() { + local arch + arch=$(uname -m) + + case "${arch}" in + x86_64) + echo "amd64" + ;; + aarch64|arm64) + echo "arm64" + ;; + ppc64le) + echo "ppc64le" + ;; + s390x) + echo "s390x" + ;; + *) + echo "Unsupported architecture: ${arch}" >&2 + echo "Supported architectures: x86_64 (amd64), aarch64 (arm64), ppc64le, s390x" >&2 + exit 1 + ;; + esac +} + +# Detect the current architecture +ARCH=$(detect_architecture) +echo "Detected architecture: ${ARCH}" + bindir="${PROJECT_DIR}/bin" tmpdir="${PROJECT_DIR}/tmp" mkdir -p "${bindir}" "${tmpdir}" -echo "Downloading latest kubectl" -curl -s --retry 3 -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" +echo "Downloading latest kubectl for ${ARCH}" +curl -s --retry 3 -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/${ARCH}/kubectl" chmod +x kubectl echo "kubectl version --client" ./kubectl version --client mv kubectl "${bindir}" -echo "Downloading helm" +echo "Downloading helm for ${ARCH}" helm_archive="${tmpdir}/helm.tgz" helm_version="v3.17.1" -curl -s https://get.helm.sh/helm-${helm_version}-linux-amd64.tar.gz --output "${helm_archive}" +curl -s https://get.helm.sh/helm-${helm_version}-linux-${ARCH}.tar.gz --output "${helm_archive}" tar xfz "${helm_archive}" -C "${tmpdir}" &> /dev/null -mv "${tmpdir}/linux-amd64/helm" "${bindir}" +mv "${tmpdir}/linux-${ARCH}/helm" "${bindir}" "${bindir}"/helm version From 2d335f398673fff6ad51eeaeac3a1d0b29199292 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 29 Jul 2025 16:48:48 +0200 Subject: [PATCH 007/164] add minikube and make aws multiarch --- scripts/evergreen/setup_aws.sh | 37 +++- scripts/minikube/install-docker.sh | 108 +++++++++++ scripts/minikube/install-minikube.sh | 190 ++++++++++++++++++++ scripts/minikube/minikube_host.sh | 226 ++++++++++++++++++++++++ scripts/minikube/setup_minikube_host.sh | 115 ++++++++++++ 5 files changed, 675 insertions(+), 1 deletion(-) create mode 100755 scripts/minikube/install-docker.sh create mode 100755 scripts/minikube/install-minikube.sh create mode 100755 scripts/minikube/minikube_host.sh create mode 100755 scripts/minikube/setup_minikube_host.sh diff --git a/scripts/evergreen/setup_aws.sh b/scripts/evergreen/setup_aws.sh index 931eb0a36..072900639 100755 --- a/scripts/evergreen/setup_aws.sh +++ b/scripts/evergreen/setup_aws.sh @@ -3,6 +3,40 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +# Detect system architecture and map to AWS CLI architecture names +detect_aws_architecture() { + local arch + arch=$(uname -m) + + case "${arch}" in + x86_64) + echo "x86_64" + ;; + aarch64|arm64) + echo "aarch64" + ;; + ppc64le) + echo "Skipping AWS CLI installation: ppc64le (IBM Power) architecture is not supported by AWS CLI v2." >&2 + echo "AWS CLI v2 only supports: x86_64 (amd64), aarch64 (arm64)" >&2 + exit 0 + ;; + s390x) + echo "Skipping AWS CLI installation: s390x (IBM Z) architecture is not supported by AWS CLI v2." >&2 + echo "AWS CLI v2 only supports: x86_64 (amd64), aarch64 (arm64)" >&2 + exit 0 + ;; + *) + echo "Skipping AWS CLI installation: Unsupported architecture: ${arch}" >&2 + echo "AWS CLI v2 only supports: x86_64 (amd64), aarch64 (arm64)" >&2 + exit 0 + ;; + esac +} + +# Detect the current architecture +ARCH=$(detect_aws_architecture) +echo "Detected architecture: ${ARCH} (AWS CLI v2 supported)" + INSTALL_DIR="${workdir:?}/.local/lib/aws" BIN_LOCATION="${workdir}/bin" @@ -11,7 +45,8 @@ mkdir -p "${BIN_LOCATION}" tmpdir=$(mktemp -d) cd "${tmpdir}" -curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" +echo "Downloading AWS CLI v2 for ${ARCH}..." +curl "https://awscli.amazonaws.com/awscli-exe-linux-${ARCH}.zip" -o "awscliv2.zip" unzip awscliv2.zip &> /dev/null docker_dir="/home/${USER}/.docker" diff --git a/scripts/minikube/install-docker.sh b/scripts/minikube/install-docker.sh new file mode 100755 index 000000000..04ae3f7d8 --- /dev/null +++ b/scripts/minikube/install-docker.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash +set -Eeou pipefail + +# Script to install Docker on s390x architecture (specifically for RHEL/Ubuntu based systems) + +print_usage() { + echo "Usage: $0 [options]" + echo "Options:" + echo " -h, --help Show this help message" + echo " -u, --user Username to add to docker group (optional)" + echo "" + echo "This script installs Docker on s390x architecture systems." +} + +DOCKER_USER="" + +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + print_usage + exit 0 + ;; + -u|--user) + DOCKER_USER="$2" + shift 2 + ;; + *) + echo "Unknown option: $1" + print_usage + exit 1 + ;; + esac +done + +echo "Installing Docker on s390x architecture..." + +# Detect OS +if [[ -f /etc/redhat-release ]]; then + OS_TYPE="rhel" +elif [[ -f /etc/debian_version ]]; then + OS_TYPE="debian" +else + echo "Unsupported OS. This script supports RHEL/CentOS and Ubuntu/Debian." + exit 1 +fi + +# Install Docker based on OS +if [[ "$OS_TYPE" == "rhel" ]]; then + echo "Detected RHEL/CentOS system..." + + # Remove any existing Docker packages + sudo yum remove -y docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine || true + + # Install required packages (some may not exist on newer RHEL versions) + sudo yum install -y yum-utils || echo "yum-utils already installed or unavailable" + sudo yum install -y device-mapper-persistent-data lvm2 || echo "device-mapper packages may not be available on this system" + + # Add Docker repository + sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo + + # Install Docker CE + sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin + +elif [[ "$OS_TYPE" == "debian" ]]; then + echo "Detected Ubuntu/Debian system..." + + # Remove any existing Docker packages + sudo apt-get remove -y docker docker-engine docker.io containerd runc || true + + # Update package index + sudo apt-get update + + # Install required packages + sudo apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release + + # Add Docker's official GPG key + curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + + # Set up stable repository + echo "deb [arch=s390x signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + + # Update package index again + sudo apt-get update + + # Install Docker CE + sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +fi + +# Start and enable Docker service +sudo systemctl start docker +sudo systemctl enable docker + +# Add user to docker group if specified +if [[ -n "$DOCKER_USER" ]]; then + echo "Adding user '$DOCKER_USER' to docker group..." + sudo usermod -aG docker "$DOCKER_USER" + echo "Note: User '$DOCKER_USER' needs to log out and log back in for group membership to take effect." +fi + +# Verify installation +echo "Verifying Docker installation..." +sudo docker --version +sudo docker run --rm hello-world + +echo "Docker installation completed successfully!" +echo "" +echo "If you added a user to the docker group, they need to log out and log back in." +echo "You can also run 'newgrp docker' to apply the group membership in the current session." \ No newline at end of file diff --git a/scripts/minikube/install-minikube.sh b/scripts/minikube/install-minikube.sh new file mode 100755 index 000000000..3e6dd4e13 --- /dev/null +++ b/scripts/minikube/install-minikube.sh @@ -0,0 +1,190 @@ +#!/usr/bin/env bash +set -Eeou pipefail + +# Script to install and configure minikube on s390x architecture + +print_usage() { + echo "Usage: $0 [options]" + echo "Options:" + echo " -h, --help Show this help message" + echo " -v, --version VERSION Minikube version to install (default: latest)" + echo " -k, --kubernetes VER Kubernetes version (default: latest stable)" + echo " -m, --memory MEMORY Memory allocation (default: 8192mb)" + echo " -c, --cpus CPUS CPU allocation (default: 4)" + echo " --profile PROFILE Minikube profile name (default: minikube)" + echo " --start Start minikube after installation" + echo "" + echo "This script installs minikube on s390x architecture and configures it for MongoDB Kubernetes e2e testing." +} + +MINIKUBE_VERSION="latest" +K8S_VERSION="" +MEMORY="8192" +CPUS="4" +PROFILE="minikube" +START_MINIKUBE="false" + +while [[ $# -gt 0 ]]; do + case $1 in + -h|--help) + print_usage + exit 0 + ;; + -v|--version) + MINIKUBE_VERSION="$2" + shift 2 + ;; + -k|--kubernetes) + K8S_VERSION="$2" + shift 2 + ;; + -m|--memory) + MEMORY="$2" + shift 2 + ;; + -c|--cpus) + CPUS="$2" + shift 2 + ;; + --profile) + PROFILE="$2" + shift 2 + ;; + --start) + START_MINIKUBE="true" + shift + ;; + *) + echo "Unknown option: $1" + print_usage + exit 1 + ;; + esac +done + +echo "Installing minikube on s390x architecture..." + +# Verify Docker is installed +if ! command -v docker &> /dev/null; then + echo "Error: Docker is required but not installed. Please install Docker first." + echo "You can use the install-docker.sh script in this directory." + exit 1 +fi + +# Verify Docker is running +if ! docker info &> /dev/null; then + echo "Error: Docker is not running. Please start Docker service." + exit 1 +fi + +# Install kubectl if not present +if ! command -v kubectl &> /dev/null; then + echo "Installing kubectl..." + + # Get the latest kubectl version if not specified + if [[ -z "$K8S_VERSION" ]]; then + K8S_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt) + fi + + # Download kubectl for s390x + curl -LO "https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/s390x/kubectl" + + # Verify the binary + curl -LO "https://dl.k8s.io/${K8S_VERSION}/bin/linux/s390x/kubectl.sha256" + echo "$(cat kubectl.sha256) kubectl" | sha256sum --check + + # Install kubectl + sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + rm -f kubectl kubectl.sha256 + + echo "kubectl installed successfully" +fi + +# Install minikube +echo "Installing minikube..." + +if [[ "$MINIKUBE_VERSION" == "latest" ]]; then + # Get the latest minikube version + MINIKUBE_VERSION=$(curl -s https://api.github.com/repos/kubernetes/minikube/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') +fi + +# Download minikube for s390x +curl -Lo minikube "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-s390x" + +# Make it executable and install +chmod +x minikube +sudo install minikube /usr/local/bin/ + +# Clean up +rm -f minikube + +echo "Minikube ${MINIKUBE_VERSION} installed successfully" + +# Configure minikube for MongoDB Kubernetes testing +echo "Configuring minikube for MongoDB Kubernetes e2e testing..." + +# Set default driver to docker +minikube config set driver docker + +# Configure resource limits +minikube config set memory "${MEMORY}mb" +minikube config set cpus "${CPUS}" + +# Enable required addons for testing +ADDONS=( + "storage-provisioner" + "default-storageclass" + "volumesnapshots" + "csi-hostpath-driver" +) + +echo "Minikube configuration completed." + +if [[ "$START_MINIKUBE" == "true" ]]; then + echo "Starting minikube cluster with profile '${PROFILE}'..." + + # Start minikube with specific configuration for MongoDB testing + minikube start \ + --profile="${PROFILE}" \ + --driver=docker \ + --memory="${MEMORY}mb" \ + --cpus="${CPUS}" \ + --disk-size=50g \ + --extra-config=kubelet.authentication-token-webhook=true \ + --extra-config=kubelet.authorization-mode=Webhook \ + --extra-config=scheduler.bind-address=0.0.0.0 \ + --extra-config=controller-manager.bind-address=0.0.0.0 \ + ${K8S_VERSION:+--kubernetes-version=$K8S_VERSION} + + # Wait for cluster to be ready + echo "Waiting for cluster to be ready..." + kubectl wait --for=condition=Ready nodes --all --timeout=300s + + # Enable addons + for addon in "${ADDONS[@]}"; do + echo "Enabling addon: $addon" + minikube addons enable "$addon" --profile="${PROFILE}" || true + done + + # Create directories that MongoDB tests expect (similar to kind setup) + echo "Setting up test directories..." + minikube ssh --profile="${PROFILE}" -- 'sudo mkdir -p /opt/data/mongo-data-{0..2} /opt/data/mongo-logs-{0..2}' + minikube ssh --profile="${PROFILE}" -- 'sudo chmod 777 /opt/data/mongo-data-* /opt/data/mongo-logs-*' + + echo "Minikube cluster started successfully!" + echo "" + echo "To use this cluster:" + echo " export KUBECONFIG=\$(minikube kubeconfig --profile=${PROFILE})" + echo " kubectl get nodes" + echo "" + echo "To stop the cluster:" + echo " minikube stop --profile=${PROFILE}" +else + echo "" + echo "Minikube installed but not started." + echo "To start minikube later, run:" + echo " minikube start --profile=${PROFILE} --driver=docker --memory=${MEMORY}mb --cpus=${CPUS}" +fi + +echo "" +echo "Installation completed successfully!" \ No newline at end of file diff --git a/scripts/minikube/minikube_host.sh b/scripts/minikube/minikube_host.sh new file mode 100755 index 000000000..ab410d7bc --- /dev/null +++ b/scripts/minikube/minikube_host.sh @@ -0,0 +1,226 @@ +#!/usr/bin/env bash + +# This is a helper script for running tests on s390x Hosts. +# It allows to configure minikube clusters and expose remote API servers on a local machine to +# enable local development while running minikube cluster on s390x instance. +# Run "minikube_host.sh help" command to see the full usage. +# Similar to evg_host.sh but uses minikube instead of kind. + +set -Eeou pipefail + +test "${MDB_BASH_DEBUG:-0}" -eq 1 && set -x + +source scripts/dev/set_env_context.sh +source scripts/funcs/printing + +if [[ -z "${S390_HOST_NAME}" ]]; then + echo "S390_HOST_NAME env var is missing" + echo "Set it to your s390x host connection string (e.g., user@hostname)" + exit 1 +fi + +get_host_url() { + echo "${S390_HOST_NAME}" +} + +cmd=${1-""} + +if [[ "${cmd}" != "" && "${cmd}" != "help" ]]; then + host_url=$(get_host_url) +fi + +kubeconfig_path="${HOME}/.operator-dev/s390-host.kubeconfig" + +configure() { + shift 1 + arch=${1-"$(uname -m)"} + + echo "Configuring minikube host ${S390_HOST_NAME} (${host_url}) with architecture ${arch}" + + if [[ "${cmd}" == "configure" && ! "${arch}" =~ ^(s390x|ppc64le|x86_64|aarch64)$ ]]; then + echo "'configure' command supports the following architectures: s390x, ppc64le, x86_64, aarch64" + exit 1 + fi + + ssh -T -q "${host_url}" "sudo chown \$(whoami):\$(whoami) ~/.docker || true; mkdir -p ~/.docker" + if [[ -f "${HOME}/.docker/config.json" ]]; then + echo "Copying local ~/.docker/config.json authorization credentials to s390x host" + jq '. | with_entries(select(.key == "auths"))' "${HOME}/.docker/config.json" | ssh -T -q "${host_url}" 'cat > ~/.docker/config.json' + fi + + sync + + ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; scripts/dev/switch_context.sh root-context; scripts/minikube/setup_minikube_host.sh ${arch}" +} + +sync() { + rsync --verbose --archive --compress --human-readable --recursive --progress \ + --delete --delete-excluded --max-size=1000000 --prune-empty-dirs \ + -e ssh \ + --include-from=.rsyncinclude \ + --exclude-from=.gitignore \ + --exclude-from=.rsyncignore \ + ./ "${host_url}:~/mongodb-kubernetes/" + + rsync --verbose --no-links --recursive --prune-empty-dirs --archive --compress --human-readable \ + --max-size=1000000 \ + -e ssh \ + ~/.operator-dev/ \ + "${host_url}:~/.operator-dev" & + + wait +} + +remote-prepare-local-e2e-run() { + set -x + sync + cmd make switch context=e2e_mdb_kind_ubi_cloudqa + cmd scripts/dev/prepare_local_e2e_run.sh + rsync --verbose --no-links --recursive --prune-empty-dirs --archive --compress --human-readable \ + --max-size=1000000 \ + -e ssh \ + "${host_url}:~/mongodb-kubernetes/.multi_cluster_local_test_files" \ + ./ & + scp "${host_url}:~/.operator-dev/multicluster_kubeconfig" "${KUBE_CONFIG_PATH}" & + + wait +} + +get-kubeconfig() { + # For minikube, we need to get the kubeconfig and certificates + echo "Getting kubeconfig from minikube on s390x host..." + local profile=${MINIKUBE_PROFILE:-mongodb-e2e} + + # Create local minikube directory structure + mkdir -p "${HOME}/.minikube/profiles/${profile}" + + # Copy certificates from remote host + echo "Copying minikube certificates..." + scp "${host_url}:~/.minikube/ca.crt" "${HOME}/.minikube/" + scp "${host_url}:~/.minikube/profiles/${profile}/client.crt" "${HOME}/.minikube/profiles/${profile}/" + scp "${host_url}:~/.minikube/profiles/${profile}/client.key" "${HOME}/.minikube/profiles/${profile}/" + + # Get kubeconfig and update paths to local ones + ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; export MINIKUBE_PROFILE=${profile}; kubectl config view --raw" > "${kubeconfig_path}" + + # Update certificate paths to local paths + sed -i '' "s|/home/cloud-user/.minikube|${HOME}/.minikube|g" "${kubeconfig_path}" + + # Update server addresses to use localhost for tunneling + sed -i '' "s|https://192.168.[0-9]*.[0-9]*:\([0-9]*\)|https://127.0.0.1:\1|g" "${kubeconfig_path}" + + echo "Copied minikube kubeconfig and certificates to ${kubeconfig_path}" +} + +recreate-minikube-cluster() { + shift 1 + profile_name=${1:-mongodb-e2e} + configure "$(uname -m)" 2>&1| prepend "minikube_host.sh configure" + echo "Recreating minikube cluster ${profile_name} on ${S390_HOST_NAME} (${host_url})..." + # shellcheck disable=SC2088 + ssh -T "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; export MINIKUBE_PROFILE=${profile_name}; minikube delete --profile=${profile_name} || true; minikube start --profile=${profile_name} --driver=docker --memory=8192mb --cpus=4" + echo "Copying kubeconfig to ${kubeconfig_path}" + get-kubeconfig +} + +tunnel() { + shift 1 + echo "Setting up tunnel for minikube cluster..." + local profile=${MINIKUBE_PROFILE:-mongodb-e2e} + + # Get the minikube API server port from remote host + local api_port + api_port=$(ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; export MINIKUBE_PROFILE=${profile}; minikube ip --profile=${profile} 2>/dev/null && echo ':8443' | tr -d '\n'") + + if [[ -z "${api_port}" ]]; then + echo "Could not determine minikube API server details. Is the cluster running?" + return 1 + fi + + # Extract just the port (8443) + local port="8443" + echo "Forwarding localhost:${port} to minikube cluster API server" + + # Forward the API server port through minikube + set -x + # shellcheck disable=SC2029 + ssh -L "${port}:$(ssh -T -q "${host_url}" "export MINIKUBE_PROFILE=${profile}; minikube ip --profile=${profile}"):${port}" "${host_url}" "$@" + set +x +} + +retry_with_sleep() { + shift 1 + cmd=$1 + local sleep_time + sleep_time=5 + + while true; do + ${cmd} || true + echo "Retrying command after ${sleep_time} of sleep: ${cmd}" + sleep 5; + done +} + +ssh_to_host() { + shift 1 + # shellcheck disable=SC2029 + ssh "$@" "${host_url}" +} + +upload-my-ssh-private-key() { + ssh -T -q "${host_url}" "mkdir -p ~/.ssh" + scp "${HOME}/.ssh/id_rsa" "${host_url}:~/.ssh/id_rsa" + scp "${HOME}/.ssh/id_rsa.pub" "${host_url}:~/.ssh/id_rsa.pub" + ssh -T -q "${host_url}" "chmod 700 ~/.ssh && chown -R \$(whoami):\$(whoami) ~/.ssh" +} + +cmd() { + if [[ "$1" == "cmd" ]]; then + shift 1 + fi + + cmd="cd ~/mongodb-kubernetes; $*" + ssh -T -q "${host_url}" "${cmd}" +} + +usage() { + echo "USAGE: + minikube_host.sh + +PREREQUISITES: + - s390x host with SSH access + - define S390_HOST_NAME env var (e.g., export S390_HOST_NAME=user@hostname) + - SSH key-based authentication configured + +COMMANDS: + configure installs on a host: calls sync, switches context, installs necessary software (auto-detects arch) + sync rsync of project directory + recreate-minikube-cluster recreates minikube cluster with specific profile and executes get-kubeconfig + remote-prepare-local-e2e-run executes prepare-local-e2e on the remote host + get-kubeconfig copies remote minikube kubeconfig locally to ~/.operator-dev/s390-host.kubeconfig + tunnel [args] creates ssh session with tunneling to all API servers + ssh [args] creates ssh session passing optional arguments to ssh + cmd [command with args] execute command as if being on s390x host + upload-my-ssh-private-key uploads your ssh keys (~/.ssh/id_rsa) to s390x host + help this message + +EXAMPLES: + export S390_HOST_NAME=user@ibmz8 + minikube_host.sh tunnel + minikube_host.sh cmd 'make e2e test=replica_set' +" +} + +case ${cmd} in +configure) configure "$@" ;; +recreate-minikube-cluster) recreate-minikube-cluster "$@" ;; +get-kubeconfig) get-kubeconfig ;; +remote-prepare-local-e2e-run) remote-prepare-local-e2e-run ;; +ssh) ssh_to_host "$@" ;; +tunnel) retry_with_sleep tunnel "$@" ;; +sync) sync ;; +cmd) cmd "$@" ;; +upload-my-ssh-private-key) upload-my-ssh-private-key ;; +help) usage ;; +*) usage ;; +esac diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh new file mode 100755 index 000000000..7628c388c --- /dev/null +++ b/scripts/minikube/setup_minikube_host.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash + +# this script downloads necessary tooling for alternative architectures (s390x, ppc64le) using minikube (similar to setup_evg_host.sh) + +set -Eeou pipefail + +check_disk_space() { + echo "Checking available disk space..." + local available_gb + available_gb=$(df / | awk 'NR==2 {print int($4/1024/1024)}') + + if [[ $available_gb -lt 5 ]]; then + echo "ERROR: Insufficient disk space. Available: ${available_gb}GB, Required: 5GB minimum" + echo "Please clean up disk space before continuing:" + echo " sudo dnf clean all" + echo " sudo rm -rf /var/cache/dnf/* /tmp/* /var/tmp/*" + echo " docker system prune -af" + return 1 + fi + + echo "Disk space check passed: ${available_gb}GB available" +} + +set_limits() { + echo "Increasing fs.inotify.max_user_instances" + sudo sysctl -w fs.inotify.max_user_instances=8192 + + echo "Increasing fs.inotify.max_user_watches" + sudo sysctl -w fs.inotify.max_user_watches=10485760 + + echo "Increasing the number of open files" + nofile_max=$(cat /proc/sys/fs/nr_open) + nproc_max=$(ulimit -u) + sudo tee -a /etc/security/limits.conf <> ~/.bashrc +} + +check_disk_space +set_limits +setup_group + +download_minikube & +download_docker & + +wait + +echo "Setting up minikube environment variables..." +echo 'export KUBE_ENVIRONMENT_NAME=minikube' >> ~/.bashrc +echo 'export MINIKUBE_PROFILE=${MINIKUBE_PROFILE:-mongodb-e2e}' >> ~/.bashrc +echo 'export KUBECONFIG=$(minikube kubeconfig --profile=${MINIKUBE_PROFILE:-mongodb-e2e} 2>/dev/null || echo ~/.kube/config)' >> ~/.bashrc + +echo "Minikube host setup completed successfully for ${ARCH}!" +echo "SETUP_GROUP: ${SETUP_GROUP}" From b9ceed6997870306c50138e8e082b6da02794857 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Tue, 29 Jul 2025 16:56:14 +0200 Subject: [PATCH 008/164] Agent dockerfiles --- docker/mongodb-agent-non-matrix/Dockerfile | 70 ++++++++++++++-------- docker/mongodb-agent/Dockerfile | 65 ++++++++------------ 2 files changed, 69 insertions(+), 66 deletions(-) diff --git a/docker/mongodb-agent-non-matrix/Dockerfile b/docker/mongodb-agent-non-matrix/Dockerfile index 0677126fd..1e2f1dc89 100644 --- a/docker/mongodb-agent-non-matrix/Dockerfile +++ b/docker/mongodb-agent-non-matrix/Dockerfile @@ -1,26 +1,41 @@ -FROM scratch AS base +FROM scratch AS tools_downloader -ARG agent_version -ARG agent_distro -ARG tools_version -ARG tools_distro +ARG mongodb_tools_url -ADD https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-${agent_version}.${agent_distro}.tar.gz /data/mongodb-agent.tar.gz -ADD https://downloads.mongodb.org/tools/db/mongodb-database-tools-${tools_distro}-${tools_version}.tgz /data/mongodb-tools.tgz +ARG mongodb_tools_version_amd64 +ADD "${mongodb_tools_url}/${mongodb_tools_version_amd64}" /data/amd64/mongodb_tools.tgz -COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE +ARG mongodb_tools_version_arm64 +ADD "${mongodb_tools_url}/${mongodb_tools_version_arm64}" /data/arm64/mongodb_tools.tgz -FROM registry.access.redhat.com/ubi9/ubi-minimal +ARG mongodb_tools_version_s390x +ADD "${mongodb_tools_url}/${mongodb_tools_version_s390x}" /data/s390x/mongodb_tools.tgz -ARG version +ARG mongodb_tools_version_ppc64le +ADD "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}" /data/ppc64le/mongodb_tools.tgz -LABEL name="MongoDB Agent" \ - version="${version}" \ - summary="MongoDB Agent" \ - description="MongoDB Agent" \ - vendor="MongoDB" \ - release="1" \ - maintainer="support@mongodb.com" +FROM scratch AS agent_downloader + +ARG mongodb_agent_url + +ARG mongodb_agent_version_amd64 +ADD "${mongodb_agent_url}/${mongodb_agent_version_amd64}" /data/amd64/mongodb_agent.tgz + +ARG mongodb_agent_version_arm64 +ADD "${mongodb_agent_url}/${mongodb_agent_version_arm64}" /data/arm64/mongodb_agent.tgz + +ARG mongodb_agent_version_s390x +ADD "${mongodb_agent_url}/${mongodb_agent_version_s390x}" /data/s390x/mongodb_agent.tgz + +ARG mongodb_agent_version_ppc64le +ADD "${mongodb_agent_url}/${mongodb_agent_version_ppc64le}" /data/ppc64le/mongodb_agent.tgz + +FROM registry.access.redhat.com/ubi9/ubi-minimal + +ARG TARGETARCH +COPY --from=tools_downloader "/data/${TARGETARCH}/mongodb_tools.tgz" /tools/mongodb_tools.tgz +COPY --from=agent_downloader "/data/${TARGETARCH}/mongodb_agent.tgz" /agent/mongodb_agent.tgz +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE # Replace libcurl-minimal and curl-minimal with the full versions # https://bugzilla.redhat.com/show_bug.cgi?id=1994521 @@ -50,20 +65,25 @@ RUN mkdir -p /agent \ && touch /var/log/mongodb-mms-automation/readiness.log \ && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log - -COPY --from=base /data/mongodb-agent.tar.gz /agent -COPY --from=base /data/mongodb-tools.tgz /agent -COPY --from=base /data/LICENSE /licenses/LICENSE - -RUN tar xfz /agent/mongodb-agent.tar.gz \ +RUN tar xfz /agent/mongodb_agent.tgz \ && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ && chmod +x /agent/mongodb-agent \ && mkdir -p /var/lib/automation/config \ && chmod -R +r /var/lib/automation/config \ - && rm /agent/mongodb-agent.tar.gz \ + && rm /agent/mongodb_agent.tgz \ && rm -r mongodb-mms-automation-agent-* -RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz +RUN tar xfz /tools/mongodb_tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /tools/mongodb_tools.tgz + +ARG version + +LABEL name="MongoDB Agent" \ + version="${version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" USER 2000 CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] diff --git a/docker/mongodb-agent/Dockerfile b/docker/mongodb-agent/Dockerfile index a9a910e56..b58933002 100644 --- a/docker/mongodb-agent/Dockerfile +++ b/docker/mongodb-agent/Dockerfile @@ -1,8 +1,9 @@ # the init database image gets supplied by pipeline.py and corresponds to the operator version we want to release # the agent with. This enables us to release the agent for older operator. ARG init_database_image +FROM ${init_database_image} AS init_database -FROM --platform=${BUILDPLATFORM} scratch AS tools_downloader +FROM scratch AS tools_downloader ARG mongodb_tools_url @@ -18,7 +19,7 @@ ADD "${mongodb_tools_url}/${mongodb_tools_version_s390x}" /data/s390x/mongodb_to ARG mongodb_tools_version_ppc64le ADD "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}" /data/ppc64le/mongodb_tools.tgz -FROM --platform=${BUILDPLATFORM} scratch AS agent_downloader +FROM scratch AS agent_downloader ARG mongodb_agent_url @@ -34,10 +35,7 @@ ADD "${mongodb_agent_url}/${mongodb_agent_version_s390x}" /data/s390x/mongodb_ag ARG mongodb_agent_version_ppc64le ADD "${mongodb_agent_url}/${mongodb_agent_version_ppc64le}" /data/ppc64le/mongodb_agent.tgz - -FROM ${init_database_image} AS init_database - -FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS dependency_downloader +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ @@ -45,46 +43,26 @@ COPY go.mod go.sum ./ RUN go mod download -FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder +COPY mongodb-community-operator /go/src/github.com/mongodb/mongodb-kubernetes/mongodb-community-operator -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ ARG TARGETOS ARG TARGETARCH - -COPY --from=dependency_downloader /go/pkg /go/pkg -COPY . /go/src/github.com/mongodb/mongodb-kubernetes - RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go -FROM scratch AS base - -COPY --from=readiness_builder /readinessprobe /data/ -COPY --from=readiness_builder /version-upgrade-hook /data/ - -COPY --from=init_database /probes/probe.sh /data/probe.sh -COPY --from=init_database /scripts/agent-launcher-lib.sh /data/ -COPY --from=init_database /scripts/agent-launcher.sh /data/ -COPY --from=init_database /licenses/LICENSE /data/ - FROM registry.access.redhat.com/ubi9/ubi-minimal -ARG version +ARG TARGETARCH +COPY --from=tools_downloader "/data/${TARGETARCH}/mongodb_tools.tgz" /tools/mongodb_tools.tgz +COPY --from=agent_downloader "/data/${TARGETARCH}/mongodb_agent.tgz" /agent/mongodb_agent.tgz -LABEL name="MongoDB Agent" \ - version="${version}" \ - summary="MongoDB Agent" \ - description="MongoDB Agent" \ - vendor="MongoDB" \ - release="1" \ - maintainer="support@mongodb.com" +COPY --from=readiness_builder /readinessprobe /opt/scripts/readinessprobe +COPY --from=readiness_builder /version-upgrade-hook /opt/scripts/version-upgrade-hook -COPY --from=base /data/probe.sh /opt/scripts/probe.sh -COPY --from=base /data/readinessprobe /opt/scripts/readinessprobe -COPY --from=base /data/version-upgrade-hook /opt/scripts/version-upgrade-hook -COPY --from=base /data/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh -COPY --from=base /data/agent-launcher.sh /opt/scripts/agent-launcher.sh -COPY --from=base /data/LICENSE /licenses/LICENSE +COPY --from=init_database /probes/probe.sh /opt/scripts/probe.sh +COPY --from=init_database /scripts/agent-launcher-lib.sh /opt/scripts/agent-launcher-lib.sh +COPY --from=init_database /scripts/agent-launcher.sh /opt/scripts/agent-launcher.sh +COPY --from=init_database /licenses/LICENSE /licenses/LICENSE # Replace libcurl-minimal and curl-minimal with the full versions # https://bugzilla.redhat.com/show_bug.cgi?id=1994521 @@ -106,11 +84,6 @@ RUN microdnf install -y --disableplugin=subscription-manager \ && microdnf upgrade -y \ && rm -rf /var/lib/apt/lists/* - -ARG TARGETARCH -COPY --from=tools_downloader "/data/${TARGETARCH}/mongodb_tools.tgz" /tools/mongodb_tools.tgz -COPY --from=agent_downloader "/data/${TARGETARCH}/mongodb_agent.tgz" /agent/mongodb_agent.tgz - RUN tar xfz /tools/mongodb_tools.tgz RUN mv mongodb-database-tools-*/bin/* /tools RUN chmod +x /tools/* @@ -126,6 +99,16 @@ RUN rm -rf mongodb-mms-automation-agent-* RUN mkdir -p /var/lib/automation/config RUN chmod -R +r /var/lib/automation/config +ARG version + +LABEL name="MongoDB Agent" \ + version="${version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" + USER 2000 HEALTHCHECK --timeout=30s CMD ls /opt/scripts/readinessprobe || exit 1 From 389b2b1bf73b945d2007154524c34c906cf905a7 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Tue, 29 Jul 2025 16:56:28 +0200 Subject: [PATCH 009/164] Optimize readiness and upgrade hook --- docker/mongodb-kubernetes-readinessprobe/Dockerfile | 11 ++++++++--- docker/mongodb-kubernetes-upgrade-hook/Dockerfile | 9 +++++++-- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/docker/mongodb-kubernetes-readinessprobe/Dockerfile b/docker/mongodb-kubernetes-readinessprobe/Dockerfile index d3fcb0a8e..55d661438 100644 --- a/docker/mongodb-kubernetes-readinessprobe/Dockerfile +++ b/docker/mongodb-kubernetes-readinessprobe/Dockerfile @@ -1,7 +1,12 @@ -FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder -WORKDIR /go/src -ADD . . +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ + +COPY go.mod go.sum ./ + +RUN go mod download + +COPY mongodb-community-operator /go/src/github.com/mongodb/mongodb-kubernetes/mongodb-community-operator ARG TARGETOS ARG TARGETARCH diff --git a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile index 90455d85d..fab594d5e 100644 --- a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile +++ b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile @@ -1,7 +1,12 @@ FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder -WORKDIR /go/src -ADD . . +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ + +COPY go.mod go.sum ./ + +RUN go mod download + +COPY mongodb-community-operator /go/src/github.com/mongodb/mongodb-kubernetes/mongodb-community-operator ARG TARGETARCH ARG TARGETOS From 664998746d0ef981b05348d1a51b201cc9313dfa Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 12 Jun 2025 17:22:16 +0200 Subject: [PATCH 010/164] Re-design pipeline Fix build scenario Remove create and push manifests Continue improvement to main Simplify main and build_context missed Pass Build Configuration object directly Use legacy and new pipeline Fix Remove --include Rename MCO test image Multi platform builds, with buildx TODOs Implement is_release_step_executed() Fix init appdb image Import sort black formatting Some cleaning and version adjustments Adapt main to new build config Add buildscenario to buildconfig Handle build env Renaming, usage of high level config All images build pass on EVG Lint Explicit image type, support custom build_path Replace old by new pipeline in EVG Add documentation Split in multiple files, cleanup WIP, passing builds on staging temp + multi arch manifests Replace usage of sonar Remove namespace Remove pin_at and build_id Copied pipeline, removed daily builds and --exclude --- .evergreen-functions.yml | 38 +- .evergreen-periodic-builds.yaml | 28 +- .evergreen.yml | 20 +- docker/mongodb-kubernetes-tests/release.json | 253 ++++++ scripts/release/atomic_pipeline.py | 856 +++++++++++++++++++ scripts/release/build_configuration.py | 21 + scripts/release/build_context.py | 81 ++ scripts/release/build_images.py | 173 ++++ scripts/release/main.py | 203 +++++ scripts/release/optimized_operator_build.py | 87 ++ 10 files changed, 1735 insertions(+), 25 deletions(-) create mode 100644 docker/mongodb-kubernetes-tests/release.json create mode 100755 scripts/release/atomic_pipeline.py create mode 100644 scripts/release/build_configuration.py create mode 100644 scripts/release/build_context.py create mode 100644 scripts/release/build_images.py create mode 100644 scripts/release/main.py create mode 100644 scripts/release/optimized_operator_build.py diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index c004dd098..a1d2a5539 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -538,7 +538,43 @@ functions: shell: bash <<: *e2e_include_expansions_in_env working_dir: src/github.com/mongodb/mongodb-kubernetes - binary: scripts/evergreen/run_python.sh pipeline.py --include ${image_name} --parallel --sign + binary: scripts/evergreen/run_python.sh scripts/release/main.py --parallel ${image_name} + + legacy_pipeline: + - *switch_context + - command: shell.exec + type: setup + params: + shell: bash + script: | + # Docker Hub workaround + # docker buildx needs the moby/buildkit image when setting up a builder so we pull it from our mirror + docker buildx create --driver=docker-container --driver-opt=image=268558157000.dkr.ecr.eu-west-1.amazonaws.com/docker-hub-mirrors/moby/buildkit:buildx-stable-1 --use + docker buildx inspect --bootstrap + - command: ec2.assume_role + display_name: Assume IAM role with permissions to pull Kondukto API token + params: + role_arn: ${kondukto_role_arn} + - command: shell.exec + display_name: Pull Kondukto API token from AWS Secrets Manager and write it to file + params: + silent: true + shell: bash + include_expansions_in_env: [AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN] + script: | + set -e + # use AWS CLI to get the Kondukto API token from AWS Secrets Manager + kondukto_token=$(aws secretsmanager get-secret-value --secret-id "kondukto-token" --region "us-east-1" --query 'SecretString' --output text) + # write the KONDUKTO_TOKEN environment variable to Silkbomb environment file + echo "KONDUKTO_TOKEN=$kondukto_token" > ${workdir}/silkbomb.env + - command: subprocess.exec + retry_on_failure: true + type: setup + params: + shell: bash + <<: *e2e_include_expansions_in_env + working_dir: src/github.com/mongodb/mongodb-kubernetes + binary: scripts/evergreen/run_python.sh pipeline.py --parallel ${image_name} --sign teardown_cloud_qa_all: - *switch_context diff --git a/.evergreen-periodic-builds.yaml b/.evergreen-periodic-builds.yaml index 82f7e7e77..c9b9d4a0d 100644 --- a/.evergreen-periodic-builds.yaml +++ b/.evergreen-periodic-builds.yaml @@ -21,7 +21,7 @@ variables: tasks: - name: periodic_build_operator commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: operator-daily @@ -35,49 +35,49 @@ tasks: - name: periodic_build_init_appdb commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: init-appdb-daily - name: periodic_build_init_database commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: init-database-daily - name: periodic_build_init_opsmanager commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: init-ops-manager-daily - name: periodic_build_database commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: database-daily - name: periodic_build_sbom_cli commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: cli - name: periodic_build_ops_manager_6 commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: ops-manager-6-daily - name: periodic_build_ops_manager_7 commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: ops-manager-7-daily - name: periodic_build_ops_manager_8 commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: ops-manager-8-daily @@ -91,7 +91,7 @@ tasks: exec_timeout_secs: 43200 commands: - func: enable_QEMU - - func: pipeline + - func: legacy_pipeline vars: image_name: mongodb-agent-daily @@ -99,7 +99,7 @@ tasks: exec_timeout_secs: 43200 commands: - func: enable_QEMU - - func: pipeline + - func: legacy_pipeline vars: image_name: mongodb-agent-1-daily @@ -123,19 +123,19 @@ tasks: - name: periodic_build_community_operator commands: - func: enable_QEMU - - func: pipeline + - func: legacy_pipeline vars: image_name: mongodb-kubernetes-operator-daily - name: periodic_build_readiness_probe commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: readinessprobe-daily - name: periodic_build_version_upgrade_post_start_hook commands: - - func: pipeline + - func: legacy_pipeline vars: image_name: operator-version-upgrade-post-start-hook-daily diff --git a/.evergreen.yml b/.evergreen.yml index 209bf152a..17d6cd5fe 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -283,7 +283,7 @@ tasks: - func: setup_building_host - func: quay_login - func: setup_docker_sbom - - func: pipeline + - func: legacy_pipeline vars: image_name: operator include_tags: release @@ -297,7 +297,7 @@ tasks: - func: setup_building_host - func: quay_login - func: setup_docker_sbom - - func: pipeline + - func: legacy_pipeline vars: image_name: init-appdb include_tags: release @@ -310,7 +310,7 @@ tasks: - func: setup_building_host - func: quay_login - func: setup_docker_sbom - - func: pipeline + - func: legacy_pipeline vars: image_name: init-database include_tags: release @@ -323,7 +323,7 @@ tasks: - func: setup_building_host - func: quay_login - func: setup_docker_sbom - - func: pipeline + - func: legacy_pipeline vars: image_name: init-ops-manager include_tags: release @@ -336,7 +336,7 @@ tasks: - func: setup_building_host - func: quay_login - func: setup_docker_sbom - - func: pipeline + - func: legacy_pipeline vars: image_name: agent include_tags: release @@ -350,7 +350,7 @@ tasks: - func: setup_building_host - func: quay_login - func: setup_docker_sbom - - func: pipeline + - func: legacy_pipeline vars: image_name: agent-pct include_tags: release @@ -395,7 +395,7 @@ tasks: commands: - func: clone - func: setup_building_host - - func: pipeline + - func: legacy_pipeline vars: image_name: agent-pct skip_tags: release @@ -410,7 +410,7 @@ tasks: commands: - func: clone - func: setup_building_host - - func: pipeline + - func: legacy_pipeline vars: image_name: agent-pct skip_tags: release @@ -554,7 +554,7 @@ tasks: - func: setup_building_host - func: quay_login - func: setup_docker_sbom - - func: pipeline + - func: legacy_pipeline vars: image_name: database @@ -573,7 +573,7 @@ tasks: - func: setup_building_host - func: quay_login - func: setup_docker_sbom - - func: pipeline + - func: legacy_pipeline vars: image_name: ops-manager include_tags: release diff --git a/docker/mongodb-kubernetes-tests/release.json b/docker/mongodb-kubernetes-tests/release.json new file mode 100644 index 000000000..4fdb45ec1 --- /dev/null +++ b/docker/mongodb-kubernetes-tests/release.json @@ -0,0 +1,253 @@ +{ + "mongodbToolsBundle": { + "ubi": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz" + }, + "mongodbOperator": "1.1.0", + "initDatabaseVersion": "1.1.0", + "initOpsManagerVersion": "1.1.0", + "initAppDbVersion": "1.1.0", + "databaseImageVersion": "1.1.0", + "agentVersion": "108.0.2.8729-1", + "openshift": { + "minimumSupportedVersion": "4.6" + }, + "search": { + "community": { + "version": "1.47.0" + } + }, + "supportedImages": { + "readinessprobe": { + "ssdlc_name": "MongoDB Controllers for Kubernetes Readiness Probe", + "versions": [ + "1.0.22" + ], + "variants": [ + "ubi" + ] + }, + "operator-version-upgrade-post-start-hook": { + "ssdlc_name": "MongoDB Controllers for Kubernetes Operator Version Upgrade Hook", + "versions": [ + "1.0.9" + ], + "variants": [ + "ubi" + ] + }, + "ops-manager": { + "ssdlc_name": "MongoDB Controllers for Kubernetes Enterprise Ops Manager", + "versions": [ + "6.0.25", + "6.0.26", + "6.0.27", + "7.0.12", + "7.0.13", + "7.0.14", + "7.0.15", + "8.0.5", + "8.0.6", + "8.0.7" + ], + "variants": [ + "ubi" + ] + }, + "mongodb-kubernetes": { + "Description": "We support 3 last versions, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Operator", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "mongodb-kubernetes-operator": { + "Description": "Community Operator daily rebuilds", + "ssdlc_name": "MongoDB Community Operator", + "versions": [ + "0.12.0", + "0.11.0", + "0.10.0", + "0.9.0", + "0.8.3", + "0.8.2", + "0.8.1", + "0.8.0", + "0.7.9", + "0.7.8", + "0.7.7", + "0.7.6" + ], + "variants": [ + "ubi" + ] + }, + "mongodb-agent": { + "Description": "Agents corresponding to OpsManager 5.x and 6.x series", + "ssdlc_name": "MongoDB Controllers for Kubernetes MongoDB Agent", + "Description for specific versions": { + "11.0.5.6963-1": "An upgraded version for OM 5.0 we use for Operator-only deployments", + "12.0.28.7763-1": "OM 6 basic version" + }, + "versions": [ + "108.0.2.8729-1" + ], + "opsManagerMapping": { + "Description": "These are the agents from which we start supporting static containers.", + "cloud_manager": "13.35.0.9498-1", + "cloud_manager_tools": "100.12.1", + "ops_manager": { + "6.0.25": { + "agent_version": "12.0.33.7866-1", + "tools_version": "100.10.0" + }, + "6.0.26": { + "agent_version": "12.0.34.7888-1", + "tools_version": "100.10.0" + }, + "6.0.27": { + "agent_version": "12.0.35.7911-1", + "tools_version": "100.10.0" + }, + "7.0.13": { + "agent_version": "107.0.13.8702-1", + "tools_version": "100.10.0" + }, + "7.0.14": { + "agent_version": "107.0.13.8702-1", + "tools_version": "100.10.0" + }, + "7.0.15": { + "agent_version": "107.0.15.8741-1", + "tools_version": "100.11.0" + }, + "8.0.5": { + "agent_version": "108.0.4.8770-1", + "tools_version": "100.11.0" + }, + "8.0.6": { + "agent_version": "108.0.6.8796-1", + "tools_version": "100.11.0" + }, + "8.0.7": { + "agent_version": "108.0.7.8810-1", + "tools_version": "100.12.0" + } + } + }, + "variants": [ + "ubi" + ] + }, + "init-ops-manager": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Init Ops Manager", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "init-database": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Init Database", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "init-appdb": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Init AppDB", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "database": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Controllers for Kubernetes Database", + "versions": [ + "1.0.0", + "1.0.1", + "1.1.0" + ], + "variants": [ + "ubi" + ] + }, + "mongodb-enterprise-server": { + "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", + "ssdlc_name": "MongoDB Enterprise Server", + "versions": [ + "4.4.0-ubi8", + "4.4.1-ubi8", + "4.4.2-ubi8", + "4.4.3-ubi8", + "4.4.4-ubi8", + "4.4.5-ubi8", + "4.4.6-ubi8", + "4.4.7-ubi8", + "4.4.8-ubi8", + "4.4.9-ubi8", + "4.4.10-ubi8", + "4.4.11-ubi8", + "4.4.12-ubi8", + "4.4.13-ubi8", + "4.4.14-ubi8", + "4.4.15-ubi8", + "4.4.16-ubi8", + "4.4.17-ubi8", + "4.4.18-ubi8", + "4.4.19-ubi8", + "4.4.20-ubi8", + "4.4.21-ubi8", + "5.0.0-ubi8", + "5.0.1-ubi8", + "5.0.2-ubi8", + "5.0.3-ubi8", + "5.0.4-ubi8", + "5.0.5-ubi8", + "5.0.6-ubi8", + "5.0.7-ubi8", + "5.0.8-ubi8", + "5.0.9-ubi8", + "5.0.10-ubi8", + "5.0.11-ubi8", + "5.0.12-ubi8", + "5.0.13-ubi8", + "5.0.14-ubi8", + "5.0.15-ubi8", + "5.0.16-ubi8", + "5.0.17-ubi8", + "5.0.18-ubi8", + "6.0.0-ubi8", + "6.0.1-ubi8", + "6.0.2-ubi8", + "6.0.3-ubi8", + "6.0.4-ubi8", + "6.0.5-ubi8", + "8.0.0-ubi8", + "8.0.0-ubi9" + ], + "variants": [ + "ubi" + ] + } + } +} diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py new file mode 100755 index 000000000..915f7f086 --- /dev/null +++ b/scripts/release/atomic_pipeline.py @@ -0,0 +1,856 @@ +#!/usr/bin/env python3 + +"""This pipeline script knows about the details of our Docker images +and where to fetch and calculate parameters. It uses Sonar.py +to produce the final images.""" +import json +import os +import shutil +from concurrent.futures import ProcessPoolExecutor +from queue import Queue +from typing import Callable, Dict, List, Optional, Tuple, Union + +import requests +import semver +from opentelemetry import trace +from packaging.version import Version + + +from lib.base_logger import logger +from scripts.evergreen.release.agent_matrix import ( + get_supported_operator_versions, +) +from scripts.evergreen.release.images_signing import ( + mongodb_artifactory_login, + sign_image, + verify_signature, +) +from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli + +from .build_configuration import BuildConfiguration +from .build_context import BuildScenario +from .build_images import process_image +from .optimized_operator_build import build_operator_image_fast + +TRACER = trace.get_tracer("evergreen-agent") +DEFAULT_NAMESPACE = "default" + +# TODO: rename architecture -> platform everywhere + +def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: + if value is None: + return [] + + if isinstance(value, str): + return [e.strip() for e in value.split(",")] + + return value + + +def get_tools_distro(tools_version: str) -> Dict[str, str]: + new_rhel_tool_version = "100.10.0" + default_distro = {"arm": "rhel90-aarch64", "amd": "rhel90-x86_64"} + if Version(tools_version) >= Version(new_rhel_tool_version): + return {"arm": "rhel93-aarch64", "amd": "rhel93-x86_64"} + return default_distro + + +def is_running_in_evg_pipeline(): + return os.getenv("RUNNING_IN_EVG", "") == "true" + + +def is_running_in_patch(): + is_patch = os.environ.get("is_patch") + return is_patch is not None and is_patch.lower() == "true" + + +def load_release_file() -> Dict: + with open("release.json") as release: + return json.load(release) + + +@TRACER.start_as_current_span("sonar_build_image") +def pipeline_process_image( + image_name: str, + dockerfile_path: str, + build_configuration: BuildConfiguration, + dockerfile_args: Dict[str, str] = None, + build_path: str = ".", + with_sbom: bool = True, +): + """Builds a Docker image with arguments defined in `args`.""" + span = trace.get_current_span() + span.set_attribute("mck.image_name", image_name) + if dockerfile_args: + span.set_attribute("mck.build_args", str(dockerfile_args)) + + # TODO use these? + build_options = { + # Will continue building an image if it finds an error. See next comment. + "continue_on_errors": True, + # But will still fail after all the tasks have completed + "fail_on_errors": True, + } + + logger.info(f"Dockerfile args: {dockerfile_args}, for image: {image_name}") + + if not dockerfile_args: + dockerfile_args = {} + logger.debug(f"Build args: {dockerfile_args}") + process_image( + image_name, + image_tag=build_configuration.version, + dockerfile_path=dockerfile_path, + dockerfile_args=dockerfile_args, + base_registry=build_configuration.base_registry, + platforms=build_configuration.platforms, + sign=build_configuration.sign, + build_path=build_path, + ) + + if with_sbom: + produce_sbom(dockerfile_args) + + +@TRACER.start_as_current_span("produce_sbom") +def produce_sbom(args): + span = trace.get_current_span() + if not is_running_in_evg_pipeline(): + logger.info("Skipping SBOM Generation (enabled only for EVG)") + return + + try: + image_pull_spec = args["quay_registry"] + args.get("ubi_suffix", "") + except KeyError: + logger.error(f"Could not find image pull spec. Args: {args}") + logger.error(f"Skipping SBOM generation") + return + + try: + image_tag = args["release_version"] + span.set_attribute("mck.release_version", image_tag) + except KeyError: + logger.error(f"Could not find image tag. Args: {args}") + logger.error(f"Skipping SBOM generation") + return + + image_pull_spec = f"{image_pull_spec}:{image_tag}" + print(f"Producing SBOM for image: {image_pull_spec} args: {args}") + + platform = "linux/amd64" + if "platform" in args: + if args["platform"] == "arm64": + platform = "linux/arm64" + elif args["platform"] == "amd64": + platform = "linux/amd64" + else: + # TODO: return here? + logger.error(f"Unrecognized architectures in {args}. Skipping SBOM generation") + + generate_sbom(image_pull_spec, platform) + + +def build_tests_image(build_configuration: BuildConfiguration): + """ + Builds image used to run tests. + """ + image_name = "mongodb-kubernetes-tests" + + # helm directory needs to be copied over to the tests docker context. + helm_src = "helm_chart" + helm_dest = "docker/mongodb-kubernetes-tests/helm_chart" + requirements_dest = "docker/mongodb-kubernetes-tests/requirements.txt" + public_src = "public" + public_dest = "docker/mongodb-kubernetes-tests/public" + + # Remove existing directories/files if they exist + shutil.rmtree(helm_dest, ignore_errors=True) + shutil.rmtree(public_dest, ignore_errors=True) + + # Copy directories and files (recursive copy) + shutil.copytree(helm_src, helm_dest) + shutil.copytree(public_src, public_dest) + shutil.copyfile("release.json", "docker/mongodb-kubernetes-tests/release.json") + shutil.copyfile("requirements.txt", requirements_dest) + + python_version = os.getenv("PYTHON_VERSION", "3.11") + if python_version == "": + raise Exception("Missing PYTHON_VERSION environment variable") + + buildargs = dict({"PYTHON_VERSION": python_version}) + + pipeline_process_image( + image_name, + dockerfile_path="Dockerfile", + build_configuration=build_configuration, + dockerfile_args=buildargs, + build_path="docker/mongodb-kubernetes-tests", + ) + + +def build_mco_tests_image(build_configuration: BuildConfiguration): + """ + Builds image used to run community tests. + """ + image_name = "mongodb-community-tests" + golang_version = os.getenv("GOLANG_VERSION", "1.24") + if golang_version == "": + raise Exception("Missing GOLANG_VERSION environment variable") + + buildargs = dict({"GOLANG_VERSION": golang_version}) + + pipeline_process_image( + image_name, + dockerfile_path="docker/mongodb-community-tests/Dockerfile", + build_configuration=build_configuration, + dockerfile_args=buildargs, + ) + + +def build_operator_image(build_configuration: BuildConfiguration): + """Calculates arguments required to build the operator image, and starts the build process.""" + # In evergreen, we can pass test_suffix env to publish the operator to a quay + # repository with a given suffix. + test_suffix = os.environ.get("test_suffix", "") + log_automation_config_diff = os.environ.get("LOG_AUTOMATION_CONFIG_DIFF", "false") + + args = { + "version": build_configuration.version, + "log_automation_config_diff": log_automation_config_diff, + "test_suffix": test_suffix, + "debug": build_configuration.debug, + } + + logger.info(f"Building Operator args: {args}") + + image_name = "mongodb-kubernetes" + build_image_generic( + image_name=image_name, + dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile", + build_configuration=build_configuration, + extra_args=args, + ) + + +def build_operator_image_patch(build_configuration: BuildConfiguration): + if not build_operator_image_fast(build_configuration): + build_operator_image(build_configuration) + + +def build_database_image(build_configuration: BuildConfiguration): + """ + Builds a new database image. + """ + release = load_release_file() + version = release["databaseImageVersion"] + args = {"version": build_configuration.version} + build_image_generic( + image_name="mongodb-kubernetes-database", + dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile", + build_configuration=build_configuration, + extra_args=args, + ) + + +def build_CLI_SBOM(build_configuration: BuildConfiguration): + if not is_running_in_evg_pipeline(): + logger.info("Skipping SBOM Generation (enabled only for EVG)") + return + + if build_configuration.platforms is None or len(build_configuration.platforms) == 0: + architectures = ["linux/amd64", "linux/arm64", "darwin/arm64", "darwin/amd64"] + elif "arm64" in build_configuration.platforms: + architectures = ["linux/arm64", "darwin/arm64"] + elif "amd64" in build_configuration.platforms: + architectures = ["linux/amd64", "darwin/amd64"] + else: + logger.error(f"Unrecognized architectures {build_configuration.platforms}. Skipping SBOM generation") + return + + release = load_release_file() + version = release["mongodbOperator"] + + for architecture in architectures: + generate_sbom_for_cli(version, architecture) + + +def should_skip_arm64(): + """ + Determines if arm64 builds should be skipped based on environment. + Returns True if running in Evergreen pipeline as a patch. + """ + return is_running_in_evg_pipeline() and is_running_in_patch() + + +@TRACER.start_as_current_span("sign_image_in_repositories") +def sign_image_in_repositories(args: Dict[str, str], arch: str = None): + span = trace.get_current_span() + repository = args["quay_registry"] + args["ubi_suffix"] + tag = args["release_version"] + if arch: + tag = f"{tag}-{arch}" + + span.set_attribute("mck.tag", tag) + + sign_image(repository, tag) + verify_signature(repository, tag) + + +def find_om_in_releases(om_version: str, releases: Dict[str, str]) -> Optional[str]: + """ + There are a few alternatives out there that allow for json-path or xpath-type + traversal of Json objects in Python, I don't have time to look for one of + them now but I have to do at some point. + """ + for release in releases: + if release["version"] == om_version: + for platform in release["platform"]: + if platform["package_format"] == "deb" and platform["arch"] == "x86_64": + for package in platform["packages"]["links"]: + if package["name"] == "tar.gz": + return package["download_link"] + return None + + +def get_om_releases() -> Dict[str, str]: + """Returns a dictionary representation of the Json document holdin all the OM + releases. + """ + ops_manager_release_archive = ( + "https://info-mongodb-com.s3.amazonaws.com/com-download-center/ops_manager_release_archive.json" + ) + + return requests.get(ops_manager_release_archive).json() + + +def find_om_url(om_version: str) -> str: + """Gets a download URL for a given version of OM.""" + releases = get_om_releases() + + current_release = find_om_in_releases(om_version, releases["currentReleases"]) + if current_release is None: + current_release = find_om_in_releases(om_version, releases["oldReleases"]) + + if current_release is None: + raise ValueError("Ops Manager version {} could not be found".format(om_version)) + + return current_release + + +def build_init_om_image(build_configuration: BuildConfiguration): + release = load_release_file() + version = release["initOpsManagerVersion"] + args = {"version": build_configuration.version} + build_image_generic( + image_name="mongodb-kubernetes-init-ops-manager", + dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile", + build_configuration=build_configuration, + extra_args=args, + ) + + +def build_om_image(build_configuration: BuildConfiguration): + # Make this a parameter for the Evergreen build + # https://github.com/evergreen-ci/evergreen/wiki/Parameterized-Builds + om_version = os.environ.get("om_version") + if om_version is None: + raise ValueError("`om_version` should be defined.") + + om_download_url = os.environ.get("om_download_url", "") + if om_download_url == "": + om_download_url = find_om_url(om_version) + + args = { + "version": om_version, + "om_download_url": om_download_url, + } + + build_image_generic( + image_name="mongodb-enterprise-ops-manager-ubi", + dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile", + build_configuration=build_configuration, + extra_args=args, + ) + + +def build_image_generic( + image_name: str, + dockerfile_path: str, + build_configuration: BuildConfiguration, + extra_args: dict | None = None, + multi_arch_args_list: list[dict] | None = None, + is_multi_arch: bool = False, +): + """ + Build one or more architecture-specific images, then (optionally) + push a manifest and sign the result. + """ + + # 1) Defaults + registry = build_configuration.base_registry + args_list = multi_arch_args_list or [extra_args or {}] + version = args_list[0].get("version", "") + architectures = [args.get("architecture") for args in args_list] + + # 2) Build each arch + for base_args in args_list: + # merge in the registry without mutating caller’s dict + build_args = {**base_args, "quay_registry": registry} + logger.debug(f"Build args: {build_args}") + + for arch in architectures: + logger.debug(f"Building {image_name} for arch={arch}") + logger.debug(f"build image generic - registry={registry}") + pipeline_process_image( + image_name=image_name, + image_tag=version, + dockerfile_path=dockerfile_path, + dockerfile_args=build_args, + base_registry=registry, + platforms=arch, + sign=False, + with_sbom=False, + ) + + # 3) Multi-arch manifest + if is_multi_arch: + create_and_push_manifest(registry + "/" + image_name, version, architectures=architectures) + + # 4) Signing (only on real releases) + if build_configuration.sign: + sign_image(registry, version) + verify_signature(registry, version) + + +def build_init_appdb(build_configuration: BuildConfiguration): + release = load_release_file() + version = release["initAppDbVersion"] + base_url = "https://fastdl.mongodb.org/tools/db/" + mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) + args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} + build_image_generic( + image_name="mongodb-kubernetes-init-appdb", + dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile", + build_configuration=build_configuration, + extra_args=args, + ) + + +# TODO: nam static: remove this once static containers becomes the default +def build_init_database(build_configuration: BuildConfiguration): + release = load_release_file() + version = release["initDatabaseVersion"] # comes from release.json + base_url = "https://fastdl.mongodb.org/tools/db/" + mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) + args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} + build_image_generic( + "mongodb-kubernetes-init-database", + "docker/mongodb-kubernetes-init-database/Dockerfile", + build_configuration=build_configuration, + extra_args=args, + ) + + +def build_community_image(build_configuration: BuildConfiguration, image_type: str): + """ + Builds image for community components (readiness probe, upgrade hook). + + Args: + build_configuration: The build configuration to use + image_type: Type of image to build ("readiness-probe" or "upgrade-hook") + """ + + if image_type == "readiness-probe": + image_name = "mongodb-kubernetes-readinessprobe" + dockerfile_path = "docker/mongodb-kubernetes-readinessprobe/Dockerfile" + elif image_type == "upgrade-hook": + image_name = "mongodb-kubernetes-operator-version-upgrade-post-start-hook" + dockerfile_path = "docker/mongodb-kubernetes-upgrade-hook/Dockerfile" + else: + raise ValueError(f"Unsupported image type: {image_type}") + + version = build_configuration.version + golang_version = os.getenv("GOLANG_VERSION", "1.24") + + # Use only amd64 if we should skip arm64 builds + if should_skip_arm64(): + platforms = ["linux/amd64"] + logger.info("Skipping ARM64 builds for community image as this is running in EVG pipeline as a patch") + else: + platforms = build_configuration.platforms or ["linux/amd64", "linux/arm64"] + + # Extract architectures from platforms for build args + architectures = [platform.split("/")[-1] for platform in platforms] + multi_arch_args_list = [] + + for arch in architectures: + arch_args = { + "version": version, + "GOLANG_VERSION": golang_version, + "architecture": arch, + "TARGETARCH": arch, + } + multi_arch_args_list.append(arch_args) + + # Create a copy of build_configuration with overridden platforms + from copy import copy + build_config_copy = copy(build_configuration) + build_config_copy.platforms = platforms + + build_image_generic( + image_name=image_name, + dockerfile_path=dockerfile_path, + build_configuration=build_config_copy, + multi_arch_args_list=multi_arch_args_list, + is_multi_arch=True, + ) + + +def build_readiness_probe_image(build_configuration: BuildConfiguration): + """ + Builds image used for readiness probe. + """ + build_community_image(build_configuration, "readiness-probe") + + +def build_upgrade_hook_image(build_configuration: BuildConfiguration): + """ + Builds image used for version upgrade post-start hook. + """ + build_community_image(build_configuration, "upgrade-hook") + + +def build_agent_pipeline( + build_configuration: BuildConfiguration, + image_version, + init_database_image, + mongodb_tools_url_ubi, + mongodb_agent_url_ubi: str, + agent_version, +): + version = f"{agent_version}_{image_version}" + + args = { + "version": version, + "agent_version": agent_version, + "ubi_suffix": "-ubi", + "release_version": image_version, + "init_database_image": init_database_image, + "mongodb_tools_url_ubi": mongodb_tools_url_ubi, + "mongodb_agent_url_ubi": mongodb_agent_url_ubi, + "quay_registry": build_configuration.base_registry, + } + + build_image_generic( + image_name="mongodb-agent-ubi", + dockerfile_path="docker/mongodb-agent/Dockerfile", + build_configuration=build_configuration, + extra_args=args, + ) + + +def build_multi_arch_agent_in_sonar( + build_configuration: BuildConfiguration, + image_version, + tools_version, +): + """ + Creates the multi-arch non-operator suffixed version of the agent. + This is a drop-in replacement for the agent + release from MCO. + This should only be called during releases. + Which will lead to a release of the multi-arch + images to quay and ecr. + """ + + logger.info(f"building multi-arch base image for: {image_version}") + args = { + "version": image_version, + "tools_version": tools_version, + } + + arch_arm = { + "agent_distro": "amzn2_aarch64", + "tools_distro": get_tools_distro(tools_version=tools_version)["arm"], + "architecture": "arm64", + } + arch_amd = { + "agent_distro": "rhel9_x86_64", + "tools_distro": get_tools_distro(tools_version=tools_version)["amd"], + "architecture": "amd64", + } + + new_rhel_tool_version = "100.10.0" + if Version(tools_version) >= Version(new_rhel_tool_version): + arch_arm["tools_distro"] = "rhel93-aarch64" + arch_amd["tools_distro"] = "rhel93-x86_64" + + joined_args = [args | arch_amd] + + # Only include arm64 if we shouldn't skip it + if not should_skip_arm64(): + joined_args.append(args | arch_arm) + + build_image_generic( + image_name="mongodb-agent-ubi", + dockerfile_path="docker/mongodb-agent-non-matrix/Dockerfile", + build_configuration=build_config_copy, + is_multi_arch=True, + multi_arch_args_list=joined_args, + ) + +# TODO: why versions are wrong -> 13.35.0.9498-1_13.35.0.9498-1_6874c19d2aab5d0007820c51 ; duplicate +# TODO: figure out why I hit toomanyrequests: Rate exceeded with the new pipeline +def build_agent_default_case(build_configuration: BuildConfiguration): + """ + Build the agent only for the latest operator for patches and operator releases. + + See more information in the function: build_agent_on_agent_bump + """ + release = load_release_file() + + # We need to release [all agents x latest operator] on operator releases + if build_configuration.scenario == BuildScenario.RELEASE: + agent_versions_to_build = gather_all_supported_agent_versions(release) + # We only need [latest agents (for each OM major version and for CM) x patch ID] for patches + else: + agent_versions_to_build = gather_latest_agent_versions(release) + + logger.info( + f"Building Agent versions: {agent_versions_to_build} for Operator versions: {build_configuration.version}" + ) + + tasks_queue = Queue() + max_workers = 1 + if build_configuration.parallel: + max_workers = None + if build_configuration.parallel_factor > 0: + max_workers = build_configuration.parallel_factor + with ProcessPoolExecutor(max_workers=max_workers) as executor: + logger.info(f"running with factor of {max_workers}") + for agent_version in agent_versions_to_build: + # We don't need to keep create and push the same image on every build. + # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. + # if build_configuration.is_release_step_executed() or build_configuration.all_agents: + # tasks_queue.put( + # executor.submit( + # build_multi_arch_agent_in_sonar, + # build_configuration, + # agent_version[0], + # agent_version[1], + # ) + # ) + _build_agent_operator( + agent_version, + build_configuration, + executor, + build_configuration.version, + tasks_queue, + build_configuration.scenario == BuildScenario.RELEASE, + ) + + queue_exception_handling(tasks_queue) + +# TODO: for now, release agents ECR release versions with image:version_version (duplicated) +def build_agent_on_agent_bump(build_configuration: BuildConfiguration): + """ + Build the agent matrix (operator version x agent version), triggered by PCT. + + We have three cases where we need to build the agent: + - e2e test runs + - operator releases + - OM/CM bumps via PCT + + We don’t require building a full matrix on e2e test runs and operator releases. + "Operator releases" and "e2e test runs" require only the latest operator x agents + + In OM/CM bumps, we release a new agent which we potentially require to release to older operators as well. + This function takes care of that. + """ + release = load_release_file() + is_release = build_configuration.is_release_step_executed() + + if build_configuration.all_agents: + # We need to release [all agents x latest operator] on operator releases to make e2e tests work + # This was changed previously in https://github.com/mongodb/mongodb-kubernetes/pull/3960 + agent_versions_to_build = gather_all_supported_agent_versions(release) + else: + # we only need to release the latest images, we don't need to re-push old images, as we don't clean them up anymore. + agent_versions_to_build = gather_latest_agent_versions(release) + + legacy_agent_versions_to_build = release["supportedImages"]["mongodb-agent"]["versions"] + + tasks_queue = Queue() + max_workers = 1 + if build_configuration.parallel: + max_workers = None + if build_configuration.parallel_factor > 0: + max_workers = build_configuration.parallel_factor + with ProcessPoolExecutor(max_workers=max_workers) as executor: + logger.info(f"running with factor of {max_workers}") + + # We need to regularly push legacy agents, otherwise ecr lifecycle policy will expire them. + # We only need to push them once in a while to ecr, so no quay required + if not is_release: + for legacy_agent in legacy_agent_versions_to_build: + tasks_queue.put( + executor.submit( + build_multi_arch_agent_in_sonar, + build_configuration, + legacy_agent, + # we assume that all legacy agents are build using that tools version + "100.9.4", + ) + ) + + for agent_version in agent_versions_to_build: + # We don't need to keep create and push the same image on every build. + # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. + if build_configuration.is_release_step_executed() or build_configuration.all_agents: + tasks_queue.put( + executor.submit( + build_multi_arch_agent_in_sonar, + build_configuration, + agent_version[0], + agent_version[1], + ) + ) + for operator_version in get_supported_operator_versions(): + logger.info(f"Building Agent versions: {agent_version} for Operator versions: {operator_version}") + _build_agent_operator( + agent_version, build_configuration, executor, operator_version, tasks_queue, is_release + ) + + queue_exception_handling(tasks_queue) + + +def queue_exception_handling(tasks_queue): + exceptions_found = False + for task in tasks_queue.queue: + if task.exception() is not None: + exceptions_found = True + logger.fatal(f"The following exception has been found when building: {task.exception()}") + if exceptions_found: + raise Exception( + f"Exception(s) found when processing Agent images. \nSee also previous logs for more info\nFailing the build" + ) + + +def _build_agent_operator( + agent_version: Tuple[str, str], + build_configuration: BuildConfiguration, + executor: ProcessPoolExecutor, + operator_version: str, + tasks_queue: Queue, + use_quay: bool = False, +): + agent_distro = "rhel9_x86_64" + tools_version = agent_version[1] + tools_distro = get_tools_distro(tools_version)["amd"] + image_version = f"{agent_version[0]}_{operator_version}" + mongodb_tools_url_ubi = ( + f"https://downloads.mongodb.org/tools/db/mongodb-database-tools-{tools_distro}-{tools_version}.tgz" + ) + mongodb_agent_url_ubi = f"https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-{agent_version[0]}.{agent_distro}.tar.gz" + init_database_image = f"{build_configuration.base_registry}/mongodb-kubernetes-init-database:{operator_version}" + + tasks_queue.put( + executor.submit( + build_agent_pipeline, + build_configuration, + image_version, + init_database_image, + mongodb_tools_url_ubi, + mongodb_agent_url_ubi, + agent_version[0], + ) + ) + + +def gather_all_supported_agent_versions(release: Dict) -> List[Tuple[str, str]]: + # This is a list of a tuples - agent version and corresponding tools version + agent_versions_to_build = list() + agent_versions_to_build.append( + ( + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager"], + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager_tools"], + ) + ) + for _, om in release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"].items(): + agent_versions_to_build.append((om["agent_version"], om["tools_version"])) + + # lets not build the same image multiple times + return sorted(list(set(agent_versions_to_build))) + + +def gather_latest_agent_versions(release: Dict) -> List[Tuple[str, str]]: + """ + This function is used when we release a new agent via OM bump. + That means we will need to release that agent with all supported operators. + Since we don’t want to release all agents again, we only release the latest, which will contain the newly added one + :return: the latest agent for each major version + """ + agent_versions_to_build = list() + agent_versions_to_build.append( + ( + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager"], + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager_tools"], + ) + ) + + latest_versions = {} + + for version in release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"].keys(): + parsed_version = semver.VersionInfo.parse(version) + major_version = parsed_version.major + if major_version in latest_versions: + latest_parsed_version = semver.VersionInfo.parse(str(latest_versions[major_version])) + latest_versions[major_version] = max(parsed_version, latest_parsed_version) + else: + latest_versions[major_version] = version + + for major_version, latest_version in latest_versions.items(): + agent_versions_to_build.append( + ( + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"][str(latest_version)][ + "agent_version" + ], + release["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"][str(latest_version)][ + "tools_version" + ], + ) + ) + + # TODO: Remove this once we don't need to use OM 7.0.12 in the OM Multicluster DR tests + # https://jira.mongodb.org/browse/CLOUDP-297377 + agent_versions_to_build.append(("107.0.12.8669-1", "100.10.0")) + + return sorted(list(set(agent_versions_to_build))) + + +def get_builder_function_for_image_name() -> Dict[str, Callable]: + """Returns a dictionary of image names that can be built.""" + + image_builders = { + "cli": build_CLI_SBOM, + "test": build_tests_image, + "operator": build_operator_image, + "mco-test": build_mco_tests_image, + # TODO: add support to build this per patch + "readiness-probe": build_readiness_probe_image, + "upgrade-hook": build_upgrade_hook_image, + "operator-quick": build_operator_image_patch, + "database": build_database_image, + "agent-pct": build_agent_on_agent_bump, + "agent": build_agent_default_case, + # + # Init images + "init-appdb": build_init_appdb, + "init-database": build_init_database, + "init-ops-manager": build_init_om_image, + # + # Ops Manager image + "ops-manager": build_om_image, + } + + return image_builders diff --git a/scripts/release/build_configuration.py b/scripts/release/build_configuration.py new file mode 100644 index 000000000..b62994d0e --- /dev/null +++ b/scripts/release/build_configuration.py @@ -0,0 +1,21 @@ +from dataclasses import dataclass +from typing import List, Optional + +from .build_context import BuildScenario + + +@dataclass +class BuildConfiguration: + scenario: BuildScenario + version: str + base_registry: str + + parallel: bool = False + parallel_factor: int = 0 + platforms: Optional[List[str]] = None + sign: bool = False + all_agents: bool = False + debug: bool = True + + def is_release_step_executed(self) -> bool: + return self.scenario == BuildScenario.RELEASE diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py new file mode 100644 index 000000000..8723ec0a3 --- /dev/null +++ b/scripts/release/build_context.py @@ -0,0 +1,81 @@ +import os +from dataclasses import dataclass +from enum import Enum +from typing import Optional + +from lib.base_logger import logger + + +class BuildScenario(str, Enum): + """Represents the context in which the build is running.""" + + RELEASE = "release" # Official release build from a git tag + PATCH = "patch" # CI build for a patch/pull request + MASTER = "master" # CI build from a merge to the master + DEVELOPMENT = "development" # Local build on a developer machine + + @classmethod + def infer_scenario_from_environment(cls) -> "BuildScenario": + """Infer the build scenario from environment variables.""" + git_tag = os.getenv("triggered_by_git_tag") + is_patch = os.getenv("is_patch", "false").lower() == "true" + is_evg = os.getenv("RUNNING_IN_EVG", "false").lower() == "true" + patch_id = os.getenv("version_id") + + if git_tag: + scenario = BuildScenario.RELEASE + logger.info(f"Build scenario: {scenario} (git_tag: {git_tag})") + elif is_patch: + scenario = BuildScenario.PATCH + logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") + elif is_evg: + scenario = BuildScenario.MASTER # TODO: ultimately we won't have RELEASE variant and master will push to staging + logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") + else: + scenario = BuildScenario.DEVELOPMENT + logger.info(f"Build scenario: {scenario}") + + return scenario + + +@dataclass +class BuildContext: + """Define build parameters based on the build scenario.""" + + scenario: BuildScenario + git_tag: Optional[str] = None + patch_id: Optional[str] = None + signing_enabled: bool = False + multi_arch: bool = True + version: Optional[str] = None + + @classmethod + def from_scenario(cls, scenario: BuildScenario) -> "BuildContext": + """Create build context from a given scenario.""" + git_tag = os.getenv("triggered_by_git_tag") + patch_id = os.getenv("version_id") + signing_enabled = scenario == BuildScenario.RELEASE + + return cls( + scenario=scenario, + git_tag=git_tag, + patch_id=patch_id, + signing_enabled=signing_enabled, + version=git_tag or patch_id, + ) + + def get_version(self) -> str: + """Gets the version that will be used to tag the images.""" + if self.scenario == BuildScenario.RELEASE: + return self.git_tag + if self.patch_id: + return self.patch_id + return "latest" + + def get_base_registry(self) -> str: + """Get the base registry URL for the current scenario.""" + if self.scenario == BuildScenario.RELEASE: + return os.environ.get("STAGING_REPO_URL") + else: + return os.environ.get("BASE_REPO_URL") + diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py new file mode 100644 index 000000000..50175d8e0 --- /dev/null +++ b/scripts/release/build_images.py @@ -0,0 +1,173 @@ +# This file is the new Sonar +import base64 +import sys +from typing import Dict + +import python_on_whales +from python_on_whales.exceptions import DockerException +import time + +import boto3 +from botocore.exceptions import BotoCoreError, ClientError + +import docker +from lib.base_logger import logger +from lib.sonar.sonar import create_ecr_repository +from scripts.evergreen.release.images_signing import sign_image, verify_signature + +# TODO: self review the PR +def ecr_login_boto3(region: str, account_id: str): + """ + Fetches an auth token from ECR via boto3 and logs + into the Docker daemon via the Docker SDK. + """ + registry = f"{account_id}.dkr.ecr.{region}.amazonaws.com" + # 1) get token + ecr = boto3.client("ecr", region_name=region) + try: + resp = ecr.get_authorization_token(registryIds=[account_id]) + except (BotoCoreError, ClientError) as e: + raise RuntimeError(f"Failed to fetch ECR token: {e}") + + auth_data = resp["authorizationData"][0] + token = auth_data["authorizationToken"] # base64 of "AWS:password" + username, password = base64.b64decode(token).decode().split(":", 1) + + # 2) docker login + client = docker.APIClient() # low-level client supports login() + login_resp = client.login(username=username, password=password, registry=registry, reauth=True) + # login_resp is a dict like {'Status': 'Login Succeeded'} + status = login_resp.get("Status", "") + if "Succeeded" not in status: + raise RuntimeError(f"Docker login failed: {login_resp}") + logger.debug(f"ECR login succeeded: {status}") + + +# TODO: don't do it every time ? Check for existence without relying on Exception +def ensure_buildx_builder(builder_name: str = "multiarch") -> str: + """ + Ensures a Docker Buildx builder exists for multi-platform builds. + + :param builder_name: Name for the buildx builder + :return: The builder name that was created or reused + """ + docker = python_on_whales.docker + + try: + docker.buildx.create( + name=builder_name, + driver="docker-container", + use=True, + bootstrap=True, + ) + logger.info(f"Created new buildx builder: {builder_name}") + except DockerException as e: + if f'existing instance for "{builder_name}"' in str(e): + logger.info(f"Builder '{builder_name}' already exists – reusing it.") + # Make sure it's the current one: + docker.buildx.use(builder_name) + else: + # Some other failure happened + logger.error(f"Failed to create buildx builder: {e}") + raise + + return builder_name + + +def build_image(tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, push: bool = True, platforms: list[str] = None): + """ + Build a Docker image using python_on_whales and Docker Buildx for multi-architecture support. + + :param tag: Image tag (name:tag) + :param dockerfile: Name or relative path of the Dockerfile within `path` + :param path: Build context path (directory with your Dockerfile) + :param args: Build arguments dictionary + :param push: Whether to push the image after building + :param platforms: List of target platforms (e.g., ["linux/amd64", "linux/arm64"]) + """ + docker = python_on_whales.docker + + try: + # Convert build args to the format expected by python_on_whales + build_args = {k: str(v) for k, v in args.items()} if args else {} + + # Set default platforms if not specified + if platforms is None: + platforms = ["linux/amd64"] + + logger.info(f"Building image: {tag}") + logger.info(f"Platforms: {platforms}") + logger.info(f"Dockerfile: {dockerfile}") + logger.info(f"Build context: {path}") + logger.debug(f"Build args: {build_args}") + + # Use buildx for multi-platform builds + if len(platforms) > 1: + logger.info(f"Multi-platform build for {len(platforms)} architectures") + + # We need a special driver to handle multi platform builds + builder_name = ensure_buildx_builder("multiarch") + + # Build the image using buildx + docker.buildx.build( + context_path=path, + file=dockerfile, + tags=[tag], + platforms=platforms, + builder=builder_name, + build_args=build_args, + push=push, + pull=False, # Don't always pull base images + ) + + logger.info(f"Successfully built {'and pushed' if push else ''} {tag}") + + except Exception as e: + logger.error(f"Failed to build image {tag}: {e}") + raise RuntimeError(f"Failed to build image {tag}: {str(e)}") + + + +def process_image( + image_name: str, + image_tag: str, + dockerfile_path: str, + dockerfile_args: Dict[str, str], + base_registry: str, + platforms: list[str] = None, + sign: bool = False, + build_path: str = ".", + push: bool = True, +): + # Login to ECR using boto3 + ecr_login_boto3(region="us-east-1", account_id="268558157000") # TODO: use environment variables + + # Helper to automatically create registry with correct name + should_create_repo = False + if should_create_repo: + repo_to_create = "julienben/staging-temp/" + image_name + logger.debug(f"repo_to_create: {repo_to_create}") + create_ecr_repository(repo_to_create) + logger.info(f"Created repository {repo_to_create}") + + # Set default platforms if none provided TODO: remove from here and do it at higher level later + if platforms is None: + platforms = ["linux/amd64"] + + docker_registry = f"{base_registry}/{image_name}" + image_full_uri = f"{docker_registry}:{image_tag}" + + # Build image with docker buildx + build_image( + tag=image_full_uri, + dockerfile=dockerfile_path, + path=build_path, + args=dockerfile_args, + push=push, + platforms=platforms + ) + + if sign: + logger.info("Signing image") + sign_image(docker_registry, image_tag) + verify_signature(docker_registry, image_tag) diff --git a/scripts/release/main.py b/scripts/release/main.py new file mode 100644 index 000000000..c3155b044 --- /dev/null +++ b/scripts/release/main.py @@ -0,0 +1,203 @@ +import argparse +import os +import sys +import time +from typing import Callable, Dict, Iterable, List, Optional + +from opentelemetry import context, trace +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( + OTLPSpanExporter as OTLPSpanGrpcExporter, +) +from opentelemetry.sdk.resources import SERVICE_NAME, Resource +from opentelemetry.sdk.trace import ( + SynchronousMultiSpanProcessor, + TracerProvider, +) +from opentelemetry.sdk.trace.export import BatchSpanProcessor +from opentelemetry.trace import NonRecordingSpan, SpanContext, TraceFlags + +from lib.base_logger import logger +from scripts.evergreen.release.images_signing import mongodb_artifactory_login +from scripts.release.atomic_pipeline import ( + build_agent_default_case, + build_agent_on_agent_bump, + build_CLI_SBOM, + build_database_image, + build_init_appdb, + build_init_database, + build_init_om_image, + build_mco_tests_image, + build_om_image, + build_operator_image, + build_operator_image_patch, + build_readiness_probe_image, + build_tests_image, + build_upgrade_hook_image, +) +from scripts.release.build_configuration import BuildConfiguration +from scripts.release.build_context import ( + BuildContext, + BuildScenario, +) + +""" +The goal of main.py, build_configuration.py and build_context.py is to provide a single source of truth for the build +configuration. All parameters that depend on the the build environment (local dev, evg, etc) should be resolved here and +not in the pipeline. +""" + + +def get_builder_function_for_image_name() -> Dict[str, Callable]: + """Returns a dictionary of image names that can be built.""" + + image_builders = { + "cli": build_CLI_SBOM, + "test": build_tests_image, + "operator": build_operator_image, + "mco-test": build_mco_tests_image, + # TODO: add support to build this per patch + "readiness-probe": build_readiness_probe_image, + "upgrade-hook": build_upgrade_hook_image, + "operator-quick": build_operator_image_patch, + "database": build_database_image, + "agent-pct": build_agent_on_agent_bump, + "agent": build_agent_default_case, + # + # Init images + "init-appdb": build_init_appdb, + "init-database": build_init_database, + "init-ops-manager": build_init_om_image, + # + # Ops Manager image + "ops-manager": build_om_image, + } + + return image_builders + + +def build_image(image_name: str, build_configuration: BuildConfiguration): + """Builds one of the supported images by its name.""" + get_builder_function_for_image_name()[image_name](build_configuration) + + +def _setup_tracing(): + trace_id = os.environ.get("otel_trace_id") + parent_id = os.environ.get("otel_parent_id") + endpoint = os.environ.get("otel_collector_endpoint") + if any(value is None for value in [trace_id, parent_id, endpoint]): + logger.info("tracing environment variables are missing, not configuring tracing") + return + logger.info(f"parent_id is {parent_id}") + logger.info(f"trace_id is {trace_id}") + logger.info(f"endpoint is {endpoint}") + span_context = SpanContext( + trace_id=int(trace_id, 16), + span_id=int(parent_id, 16), + is_remote=False, + # Magic number needed for our OTEL collector + trace_flags=TraceFlags(0x01), + ) + ctx = trace.set_span_in_context(NonRecordingSpan(span_context)) + context.attach(ctx) + sp = SynchronousMultiSpanProcessor() + span_processor = BatchSpanProcessor( + OTLPSpanGrpcExporter( + endpoint=endpoint, + ) + ) + sp.add_span_processor(span_processor) + resource = Resource(attributes={SERVICE_NAME: "evergreen-agent"}) + provider = TracerProvider(resource=resource, active_span_processor=sp) + trace.set_tracer_provider(provider) + + +def main(): + + _setup_tracing() + parser = argparse.ArgumentParser(description="Build container images.") + parser.add_argument("image", help="Image to build.") # Required + parser.add_argument("--parallel", action="store_true", help="Build images in parallel.") + parser.add_argument("--debug", action="store_true", help="Enable debug logging.") + parser.add_argument("--sign", action="store_true", help="Sign images.") + parser.add_argument( + "--scenario", + choices=list(BuildScenario), + help=f"Override the build scenario instead of inferring from environment. Options: release, patch, master, development", + ) + # Override arguments for build context and configuration + parser.add_argument( + "--platform", + default="linux/amd64", + help="Target platforms for multi-arch builds (comma-separated). Example: linux/amd64,linux/arm64. Defaults to linux/amd64.", + ) + parser.add_argument( + "--version", + help="Override the version/tag instead of resolving from build scenario", + ) + parser.add_argument( + "--registry", + help="Override the base registry instead of resolving from build scenario", + ) + + # Agent specific arguments + parser.add_argument( + "--all-agents", + action="store_true", + help="Build all agent variants instead of only the latest.", + ) + parser.add_argument( + "--parallel-factor", + default=0, + type=int, + help="Number of builds to run in parallel, defaults to number of cores", + ) + + args = parser.parse_args() + + build_config = build_config_from_args(args) + logger.info(f"Building image: {args.image}") + logger.info(f"Build configuration: {build_config}") + + build_image(args.image, build_config) + + +def build_config_from_args(args): + # Validate that the image name is supported + supported_images = get_builder_function_for_image_name().keys() + if args.image not in supported_images: + logger.error(f"Unsupported image '{args.image}'. Supported images: {', '.join(supported_images)}") + sys.exit(1) + + # Parse platform argument (comma-separated) + platforms = [p.strip() for p in args.platform.split(",")] + SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] + if any(p not in SUPPORTED_PLATFORMS for p in platforms): + logger.error(f"Unsupported platform in '{args.platform}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}") + sys.exit(1) + + # Centralized configuration management with overrides + build_scenario = args.scenario or BuildScenario.infer_scenario_from_environment() + build_context = BuildContext.from_scenario(build_scenario) + + # Resolve final values with overrides + scenario = args.scenario or build_context.scenario + version = args.version or build_context.get_version() + registry = args.registry or build_context.get_base_registry() + sign = args.sign or build_context.signing_enabled + all_agents = args.all_agents or bool(os.environ.get("all_agents", False)) + + return BuildConfiguration( + scenario=scenario, + version=version, + base_registry=registry, + parallel=args.parallel, + debug=args.debug, # TODO: is debug used ? + platforms=platforms, + sign=sign, + all_agents=all_agents, + parallel_factor=args.parallel_factor, + ) + + +if __name__ == "__main__": + main() diff --git a/scripts/release/optimized_operator_build.py b/scripts/release/optimized_operator_build.py new file mode 100644 index 000000000..c59e3c003 --- /dev/null +++ b/scripts/release/optimized_operator_build.py @@ -0,0 +1,87 @@ +import os +import subprocess +import tarfile +from datetime import datetime, timedelta, timezone + +import docker +from lib.base_logger import logger +from scripts.release.build_configuration import BuildConfiguration + + +def copy_into_container(client, src, dst): + """Copies a local file into a running container.""" + + os.chdir(os.path.dirname(src)) + srcname = os.path.basename(src) + with tarfile.open(src + ".tar", mode="w") as tar: + tar.add(srcname) + + name, dst = dst.split(":") + container = client.containers.get(name) + + with open(src + ".tar", "rb") as fd: + container.put_archive(os.path.dirname(dst), fd.read()) + + +def build_operator_image_fast(build_configuration: BuildConfiguration) -> bool: + """This function builds the operator locally and pushed into an existing + Docker image. This is the fastest way I could image we can do this.""" + + client = docker.from_env() + # image that we know is where we build operator. + image_repo = build_configuration.base_registry + "/" + build_configuration.image_type + "/mongodb-kubernetes" + image_tag = "latest" + repo_tag = image_repo + ":" + image_tag + + logger.debug(f"Pulling image: {repo_tag}") + try: + image = client.images.get(repo_tag) + except docker.errors.ImageNotFound: + logger.debug("Operator image does not exist locally. Building it now") + return False + + logger.debug("Done") + too_old = datetime.now() - timedelta(hours=3) + image_timestamp = datetime.fromtimestamp( + image.history()[0]["Created"] + ) # Layer 0 is the latest added layer to this Docker image. [-1] is the FROM layer. + + if image_timestamp < too_old: + logger.info("Current operator image is too old, will rebuild it completely first") + return False + + container_name = "mongodb-enterprise-operator" + operator_binary_location = "/usr/local/bin/mongodb-kubernetes-operator" + try: + client.containers.get(container_name).remove() + logger.debug(f"Removed {container_name}") + except docker.errors.NotFound: + pass + + container = client.containers.run(repo_tag, name=container_name, entrypoint="sh", detach=True) + + logger.debug("Building operator with debugging symbols") + subprocess.run(["make", "manager"], check=True, stdout=subprocess.PIPE) + logger.debug("Done building the operator") + + copy_into_container( + client, + os.getcwd() + "/docker/mongodb-kubernetes-operator/content/mongodb-kubernetes-operator", + container_name + ":" + operator_binary_location, + ) + + # Commit changes on disk as a tag + container.commit( + repository=image_repo, + tag=image_tag, + ) + # Stop this container so we can use it next time + container.stop() + container.remove() + + logger.info("Pushing operator to {}:{}".format(image_repo, image_tag)) + client.images.push( + repository=image_repo, + tag=image_tag, + ) + return True From 675bee46ab1eb3891563e068890e6733cee47e66 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Tue, 29 Jul 2025 17:37:24 +0200 Subject: [PATCH 011/164] Remove file --- docker/mongodb-kubernetes-tests/release.json | 253 ------------------- 1 file changed, 253 deletions(-) delete mode 100644 docker/mongodb-kubernetes-tests/release.json diff --git a/docker/mongodb-kubernetes-tests/release.json b/docker/mongodb-kubernetes-tests/release.json deleted file mode 100644 index 4fdb45ec1..000000000 --- a/docker/mongodb-kubernetes-tests/release.json +++ /dev/null @@ -1,253 +0,0 @@ -{ - "mongodbToolsBundle": { - "ubi": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz" - }, - "mongodbOperator": "1.1.0", - "initDatabaseVersion": "1.1.0", - "initOpsManagerVersion": "1.1.0", - "initAppDbVersion": "1.1.0", - "databaseImageVersion": "1.1.0", - "agentVersion": "108.0.2.8729-1", - "openshift": { - "minimumSupportedVersion": "4.6" - }, - "search": { - "community": { - "version": "1.47.0" - } - }, - "supportedImages": { - "readinessprobe": { - "ssdlc_name": "MongoDB Controllers for Kubernetes Readiness Probe", - "versions": [ - "1.0.22" - ], - "variants": [ - "ubi" - ] - }, - "operator-version-upgrade-post-start-hook": { - "ssdlc_name": "MongoDB Controllers for Kubernetes Operator Version Upgrade Hook", - "versions": [ - "1.0.9" - ], - "variants": [ - "ubi" - ] - }, - "ops-manager": { - "ssdlc_name": "MongoDB Controllers for Kubernetes Enterprise Ops Manager", - "versions": [ - "6.0.25", - "6.0.26", - "6.0.27", - "7.0.12", - "7.0.13", - "7.0.14", - "7.0.15", - "8.0.5", - "8.0.6", - "8.0.7" - ], - "variants": [ - "ubi" - ] - }, - "mongodb-kubernetes": { - "Description": "We support 3 last versions, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", - "ssdlc_name": "MongoDB Controllers for Kubernetes Operator", - "versions": [ - "1.0.0", - "1.0.1", - "1.1.0" - ], - "variants": [ - "ubi" - ] - }, - "mongodb-kubernetes-operator": { - "Description": "Community Operator daily rebuilds", - "ssdlc_name": "MongoDB Community Operator", - "versions": [ - "0.12.0", - "0.11.0", - "0.10.0", - "0.9.0", - "0.8.3", - "0.8.2", - "0.8.1", - "0.8.0", - "0.7.9", - "0.7.8", - "0.7.7", - "0.7.6" - ], - "variants": [ - "ubi" - ] - }, - "mongodb-agent": { - "Description": "Agents corresponding to OpsManager 5.x and 6.x series", - "ssdlc_name": "MongoDB Controllers for Kubernetes MongoDB Agent", - "Description for specific versions": { - "11.0.5.6963-1": "An upgraded version for OM 5.0 we use for Operator-only deployments", - "12.0.28.7763-1": "OM 6 basic version" - }, - "versions": [ - "108.0.2.8729-1" - ], - "opsManagerMapping": { - "Description": "These are the agents from which we start supporting static containers.", - "cloud_manager": "13.35.0.9498-1", - "cloud_manager_tools": "100.12.1", - "ops_manager": { - "6.0.25": { - "agent_version": "12.0.33.7866-1", - "tools_version": "100.10.0" - }, - "6.0.26": { - "agent_version": "12.0.34.7888-1", - "tools_version": "100.10.0" - }, - "6.0.27": { - "agent_version": "12.0.35.7911-1", - "tools_version": "100.10.0" - }, - "7.0.13": { - "agent_version": "107.0.13.8702-1", - "tools_version": "100.10.0" - }, - "7.0.14": { - "agent_version": "107.0.13.8702-1", - "tools_version": "100.10.0" - }, - "7.0.15": { - "agent_version": "107.0.15.8741-1", - "tools_version": "100.11.0" - }, - "8.0.5": { - "agent_version": "108.0.4.8770-1", - "tools_version": "100.11.0" - }, - "8.0.6": { - "agent_version": "108.0.6.8796-1", - "tools_version": "100.11.0" - }, - "8.0.7": { - "agent_version": "108.0.7.8810-1", - "tools_version": "100.12.0" - } - } - }, - "variants": [ - "ubi" - ] - }, - "init-ops-manager": { - "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", - "ssdlc_name": "MongoDB Controllers for Kubernetes Init Ops Manager", - "versions": [ - "1.0.0", - "1.0.1", - "1.1.0" - ], - "variants": [ - "ubi" - ] - }, - "init-database": { - "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", - "ssdlc_name": "MongoDB Controllers for Kubernetes Init Database", - "versions": [ - "1.0.0", - "1.0.1", - "1.1.0" - ], - "variants": [ - "ubi" - ] - }, - "init-appdb": { - "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", - "ssdlc_name": "MongoDB Controllers for Kubernetes Init AppDB", - "versions": [ - "1.0.0", - "1.0.1", - "1.1.0" - ], - "variants": [ - "ubi" - ] - }, - "database": { - "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", - "ssdlc_name": "MongoDB Controllers for Kubernetes Database", - "versions": [ - "1.0.0", - "1.0.1", - "1.1.0" - ], - "variants": [ - "ubi" - ] - }, - "mongodb-enterprise-server": { - "Description": "The lowest version corresponds to the lowest supported Operator version, see https://wiki.corp.mongodb.com/display/MMS/Kubernetes+Operator+Support+Policy", - "ssdlc_name": "MongoDB Enterprise Server", - "versions": [ - "4.4.0-ubi8", - "4.4.1-ubi8", - "4.4.2-ubi8", - "4.4.3-ubi8", - "4.4.4-ubi8", - "4.4.5-ubi8", - "4.4.6-ubi8", - "4.4.7-ubi8", - "4.4.8-ubi8", - "4.4.9-ubi8", - "4.4.10-ubi8", - "4.4.11-ubi8", - "4.4.12-ubi8", - "4.4.13-ubi8", - "4.4.14-ubi8", - "4.4.15-ubi8", - "4.4.16-ubi8", - "4.4.17-ubi8", - "4.4.18-ubi8", - "4.4.19-ubi8", - "4.4.20-ubi8", - "4.4.21-ubi8", - "5.0.0-ubi8", - "5.0.1-ubi8", - "5.0.2-ubi8", - "5.0.3-ubi8", - "5.0.4-ubi8", - "5.0.5-ubi8", - "5.0.6-ubi8", - "5.0.7-ubi8", - "5.0.8-ubi8", - "5.0.9-ubi8", - "5.0.10-ubi8", - "5.0.11-ubi8", - "5.0.12-ubi8", - "5.0.13-ubi8", - "5.0.14-ubi8", - "5.0.15-ubi8", - "5.0.16-ubi8", - "5.0.17-ubi8", - "5.0.18-ubi8", - "6.0.0-ubi8", - "6.0.1-ubi8", - "6.0.2-ubi8", - "6.0.3-ubi8", - "6.0.4-ubi8", - "6.0.5-ubi8", - "8.0.0-ubi8", - "8.0.0-ubi9" - ], - "variants": [ - "ubi" - ] - } - } -} From 833e25f2746f5e5252c3d4e53e6842df47ca633a Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Tue, 29 Jul 2025 17:37:49 +0200 Subject: [PATCH 012/164] Put lib back in dependencies --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index c3ce86737..9461810cd 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,6 +34,7 @@ wrapt==1.17.2 botocore==1.39.4 boto3==1.39.4 python-frontmatter==1.1.0 +python-on-whales # from kubeobject freezegun==1.5.3 From 15e7f51201514c01cde646cfd94697e49cf4f2c0 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Tue, 29 Jul 2025 17:40:02 +0200 Subject: [PATCH 013/164] add todo --- scripts/release/build_context.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py index 8723ec0a3..f163c3818 100644 --- a/scripts/release/build_context.py +++ b/scripts/release/build_context.py @@ -61,7 +61,7 @@ def from_scenario(cls, scenario: BuildScenario) -> "BuildContext": git_tag=git_tag, patch_id=patch_id, signing_enabled=signing_enabled, - version=git_tag or patch_id, + version=git_tag or patch_id, #TODO: update this ) def get_version(self) -> str: From 120c1af4da25634f7c195b6745b9b2fbde2c686a Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Tue, 29 Jul 2025 17:43:17 +0200 Subject: [PATCH 014/164] Fix --- scripts/release/atomic_pipeline.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 915f7f086..856605868 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -403,12 +403,9 @@ def build_image_generic( logger.debug(f"build image generic - registry={registry}") pipeline_process_image( image_name=image_name, - image_tag=version, dockerfile_path=dockerfile_path, + build_configuration=build_configuration, dockerfile_args=build_args, - base_registry=registry, - platforms=arch, - sign=False, with_sbom=False, ) From c9ceabf14907ad98f6d2057e193b491826f2dde9 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Tue, 29 Jul 2025 18:47:59 +0200 Subject: [PATCH 015/164] Remove multi arch call, fix test image path --- scripts/release/atomic_pipeline.py | 8 ++++---- scripts/release/build_images.py | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 856605868..3653e7b27 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -181,7 +181,7 @@ def build_tests_image(build_configuration: BuildConfiguration): pipeline_process_image( image_name, - dockerfile_path="Dockerfile", + dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", build_configuration=build_configuration, dockerfile_args=buildargs, build_path="docker/mongodb-kubernetes-tests", @@ -409,9 +409,9 @@ def build_image_generic( with_sbom=False, ) - # 3) Multi-arch manifest - if is_multi_arch: - create_and_push_manifest(registry + "/" + image_name, version, architectures=architectures) + # # 3) Multi-arch manifest + # if is_multi_arch: + # create_and_push_manifest(registry + "/" + image_name, version, architectures=architectures) # 4) Signing (only on real releases) if build_configuration.sign: diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 50175d8e0..66e6b0d3a 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -117,6 +117,7 @@ def build_image(tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, builder=builder_name, build_args=build_args, push=push, + provenance=False, # To not get an untagged image for single platform builds pull=False, # Don't always pull base images ) From 46f0998d2335fad9311734cbede5b14e6864415d Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 30 Jul 2025 13:52:17 +0200 Subject: [PATCH 016/164] make jq multiarch and all other scriupts as well --- .evergreen-functions.yml | 15 +-- .evergreen.yml | 1 - scripts/dev/recreate_python_venv.sh | 87 ++++++++++++++-- scripts/evergreen/setup_aws.sh | 125 +++++++++++++++++------ scripts/evergreen/setup_jq.sh | 4 +- scripts/evergreen/setup_minikube_host.sh | 66 ++++++++++++ scripts/minikube/minikube_host.sh | 12 +-- 7 files changed, 254 insertions(+), 56 deletions(-) create mode 100755 scripts/evergreen/setup_minikube_host.sh diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index 3cd67d524..71837ab97 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -243,7 +243,7 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes add_to_path: - ${workdir}/bin - binary: scripts/dev/setup_minikube_host.sh + binary: scripts/minikube/setup_minikube_host.sh lint_repo: - command: subprocess.exec @@ -271,13 +271,16 @@ functions: - *python_venv # This differs for normal evg_host as we require minikube instead of kind for - # IBM machines + # IBM machines and install aws cli via pip instead setup_building_host_minikube: - *switch_context - - *setup_aws - - *configure_docker_auth - - *setup_ibm_host - - *python_venv + - command: subprocess.exec + type: setup + params: + working_dir: src/github.com/mongodb/mongodb-kubernetes + add_to_path: + - ${workdir}/bin + command: scripts/evergreen/setup_minikube_host.sh prune_docker_resources: - command: subprocess.exec diff --git a/.evergreen.yml b/.evergreen.yml index 2d2e113ae..31655847c 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -81,7 +81,6 @@ variables: setup_group_can_fail_task: true setup_group: - func: clone - - func: download_kube_tools - func: setup_building_host_minikube - &setup_group_multi_cluster diff --git a/scripts/dev/recreate_python_venv.sh b/scripts/dev/recreate_python_venv.sh index fb1f9ab8f..a5388e40f 100755 --- a/scripts/dev/recreate_python_venv.sh +++ b/scripts/dev/recreate_python_venv.sh @@ -4,26 +4,101 @@ set -Eeou pipefail +# Parse command line arguments +INSTALL_REQUIREMENTS=true + +while [[ $# -gt 0 ]]; do + case $1 in + --skip-requirements) + INSTALL_REQUIREMENTS=false + shift + ;; + -h|--help) + echo "Usage: $0 [--skip-requirements]" + echo " --skip-requirements Skip installing requirements.txt" + echo " -h, --help Show this help message" + exit 0 + ;; + *) + echo "Unknown option: $1" + echo "Use -h or --help for usage information" + exit 1 + ;; + esac +done + source scripts/dev/set_env_context.sh +# Ensure Python 3.10 is available, install if needed +ensure_python310() { + echo "Checking current Python version..." >&2 + + # Check if current python is 3.10 + if command -v python3 &> /dev/null; then + local version + if version=$(python3 --version 2>&1) && [[ "${version}" == *"Python 3.10"* ]]; then + echo "Found Python 3.10: ${version}" >&2 + echo "python3" + return 0 + else + echo "Current python3 version: ${version}" >&2 + fi + fi + + # Try to install Python 3.10 using pyenv if available + if command -v pyenv &> /dev/null; then + echo "Python 3.10 not found. Attempting to install via pyenv..." >&2 + + # Check if any 3.10 version is already installed + if pyenv versions --bare | grep -q "^3\.10\."; then + local installed_version + installed_version=$(pyenv versions --bare | grep "^3\.10\." | head -1) + echo "Found existing pyenv Python 3.10: ${installed_version}" >&2 + pyenv global "${installed_version}" + echo "python3" + return 0 + fi + + # Install latest Python 3.10 + local latest_310 + latest_310=$(pyenv install --list | grep -E "^[[:space:]]*3\.10\.[0-9]+$" | tail -1 | xargs) + if [[ -n "${latest_310}" ]]; then + echo "Installing Python ${latest_310} via pyenv..." >&2 + if pyenv install "${latest_310}"; then + pyenv global "${latest_310}" + echo "python3" + return 0 + fi + fi + fi + + echo "Error: No suitable Python 3.10 installation found and unable to install via pyenv." >&2 + echo "Please ensure Python 3.10 is installed or pyenv is available." >&2 + return 1 +} + if [[ -d "${PROJECT_DIR}"/venv ]]; then echo "Removing venv..." cd "${PROJECT_DIR}" rm -rf "venv" fi -# in our EVG hosts, python versions are always in /opt/python -python_bin="/opt/python/${PYTHON_VERSION}/bin/python3" -if [[ "$(uname)" == "Darwin" ]]; then - python_bin="python${PYTHON_VERSION}" -fi +# Ensure Python 3.10 is available +python_bin=$(ensure_python310) echo "Using python from the following path: ${python_bin}" "${python_bin}" -m venv venv source venv/bin/activate pip install --upgrade pip -pip install -r requirements.txt + +if [[ "${INSTALL_REQUIREMENTS}" == "true" ]]; then + echo "Installing requirements.txt..." + pip install -r requirements.txt +else + echo "Skipping requirements.txt installation (--skip-requirements flag used)" +fi + echo "Python venv was recreated successfully." echo "Current python path: $(which python)" python --version diff --git a/scripts/evergreen/setup_aws.sh b/scripts/evergreen/setup_aws.sh index 072900639..5563a8a30 100755 --- a/scripts/evergreen/setup_aws.sh +++ b/scripts/evergreen/setup_aws.sh @@ -3,51 +3,116 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh -# Detect system architecture and map to AWS CLI architecture names -detect_aws_architecture() { +# Detect system architecture +detect_architecture() { local arch arch=$(uname -m) + echo "Detected architecture: ${arch}" >&2 + echo "${arch}" +} + +# Install AWS CLI v2 via binary download (for x86_64 and aarch64) +install_aws_cli_binary() { + local arch="$1" + echo "Installing AWS CLI v2 via binary download for ${arch}..." + # Map architecture names for AWS CLI download URLs + local aws_arch case "${arch}" in x86_64) - echo "x86_64" + aws_arch="x86_64" ;; aarch64|arm64) - echo "aarch64" - ;; - ppc64le) - echo "Skipping AWS CLI installation: ppc64le (IBM Power) architecture is not supported by AWS CLI v2." >&2 - echo "AWS CLI v2 only supports: x86_64 (amd64), aarch64 (arm64)" >&2 - exit 0 - ;; - s390x) - echo "Skipping AWS CLI installation: s390x (IBM Z) architecture is not supported by AWS CLI v2." >&2 - echo "AWS CLI v2 only supports: x86_64 (amd64), aarch64 (arm64)" >&2 - exit 0 + aws_arch="aarch64" ;; *) - echo "Skipping AWS CLI installation: Unsupported architecture: ${arch}" >&2 - echo "AWS CLI v2 only supports: x86_64 (amd64), aarch64 (arm64)" >&2 - exit 0 + echo "Error: Unsupported architecture for binary installation: ${arch}" >&2 + return 1 ;; esac + + # Download and install AWS CLI v2 + local temp_dir + temp_dir=$(mktemp -d) + cd "${temp_dir}" + + echo "Downloading AWS CLI v2 for ${aws_arch}..." + curl -s "https://awscli.amazonaws.com/awscli-exe-linux-${aws_arch}.zip" -o "awscliv2.zip" + + unzip -q awscliv2.zip + sudo ./aws/install --update + + # Clean up + cd - > /dev/null + rm -rf "${temp_dir}" + + # Verify installation + if command -v aws &> /dev/null; then + echo "AWS CLI v2 installed successfully:" + aws --version + else + echo "Error: AWS CLI v2 installation failed" >&2 + return 1 + fi } -# Detect the current architecture -ARCH=$(detect_aws_architecture) -echo "Detected architecture: ${ARCH} (AWS CLI v2 supported)" +# Install AWS CLI v1 via pip (for IBM architectures: ppc64le, s390x) +install_aws_cli_pip() { + echo "Installing AWS CLI v1 via pip (for IBM architectures)..." + + # Ensure pip is available + if ! command -v pip3 &> /dev/null && ! command -v pip &> /dev/null; then + echo "Error: pip is not available. Please install Python and pip first." >&2 + return 1 + fi -INSTALL_DIR="${workdir:?}/.local/lib/aws" -BIN_LOCATION="${workdir}/bin" + # Use pip3 if available, otherwise pip + local pip_cmd="pip3" + if ! command -v pip3 &> /dev/null; then + pip_cmd="pip" + fi -mkdir -p "${BIN_LOCATION}" + echo "Installing AWS CLI using ${pip_cmd}..." + ${pip_cmd} install --user awscli -tmpdir=$(mktemp -d) -cd "${tmpdir}" + # Add ~/.local/bin to PATH if not already there (where pip --user installs) + if [[ ":$PATH:" != *":$HOME/.local/bin:"* ]]; then + export PATH="$HOME/.local/bin:$PATH" + echo "Added ~/.local/bin to PATH" + fi + + # Verify installation + if command -v aws &> /dev/null; then + echo "AWS CLI v1 installed successfully:" + aws --version + else + echo "Error: AWS CLI v1 installation failed or not found in PATH" >&2 + return 1 + fi +} + +# Main installation logic +install_aws_cli() { + local arch + arch=$(detect_architecture) + + case "${arch}" in + ppc64le|s390x) + echo "IBM architecture detected (${arch}). Using pip installation..." + install_aws_cli_pip + ;; + x86_64|aarch64|arm64) + echo "Standard architecture detected (${arch}). Using binary installation..." + install_aws_cli_binary "${arch}" + ;; + *) + echo "Warning: Unknown architecture ${arch}. Falling back to pip installation..." + install_aws_cli_pip + ;; + esac +} -echo "Downloading AWS CLI v2 for ${ARCH}..." -curl "https://awscli.amazonaws.com/awscli-exe-linux-${ARCH}.zip" -o "awscliv2.zip" -unzip awscliv2.zip &> /dev/null +install_aws_cli docker_dir="/home/${USER}/.docker" if [[ ! -d "${docker_dir}" ]]; then @@ -56,7 +121,5 @@ fi sudo chown "${USER}":"${USER}" "${docker_dir}" -R sudo chmod g+rwx "${docker_dir}" -R -sudo ./aws/install --bin-dir "${BIN_LOCATION}" --install-dir "${INSTALL_DIR}" --update -cd - -rm -rf "${tmpdir}" +echo "AWS CLI setup completed successfully." diff --git a/scripts/evergreen/setup_jq.sh b/scripts/evergreen/setup_jq.sh index e21d4a07e..23ad3e723 100755 --- a/scripts/evergreen/setup_jq.sh +++ b/scripts/evergreen/setup_jq.sh @@ -10,4 +10,6 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh source scripts/funcs/install -download_and_install_binary "${PROJECT_DIR:-.}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64" +arch=$(uname -m) + +download_and_install_binary "${PROJECT_DIR:-.}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${arch}" diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh new file mode 100755 index 000000000..eb2e7906c --- /dev/null +++ b/scripts/evergreen/setup_minikube_host.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash + +# Consolidated setup script for minikube host with multi-architecture support +# This script groups all the setup steps needed for IBM machines and other architectures +# Can be run on static hosts for testing and verification + +source scripts/dev/set_env_context.sh +set -Eeou pipefail + +echo "==========================================" +echo "Setting up minikube host with multi-architecture support" +echo "Architecture: $(uname -m)" +echo "OS: $(uname -s)" +echo "==========================================" + +# Function to run a setup step with error handling and logging +run_setup_step() { + local step_name="$1" + shift + local script_command=("$@") + + echo "" + echo ">>> Running: ${step_name}" + echo ">>> Command: ${script_command[*]}" + + local script_path="${script_command[0]}" + if [[ -f "${script_path}" ]]; then + if "${script_command[@]}"; then + echo "✅ ${step_name} completed successfully" + else + echo "❌ ${step_name} failed" + exit 1 + fi + else + echo "❌ Script not found: ${script_path}" + exit 1 + fi +} + +# Setup Python environment (needed for AWS CLI pip installation) +run_setup_step "Python Virtual Environment" "scripts/dev/recreate_python_venv.sh" "--skip-requirements" + +run_setup_step "AWS CLI Setup" "scripts/evergreen/setup_aws.sh" + +run_setup_step "kubectl and helm Setup" "scripts/evergreen/setup_kubectl.sh" + +run_setup_step "jq Setup" "scripts/evergreen/setup_jq.sh" + +run_setup_step "IBM Host Setup" "scripts/minikube/setup_minikube_host.sh" + +run_setup_step "Docker Authentication" "scripts/dev/configure_docker_auth.sh" + +echo "" +echo "==========================================" +echo "✅ Minikube host setup completed successfully!" +echo "==========================================" +echo "" +echo "Installed tools summary:" +echo "- Python: $(python --version 2>/dev/null || python3 --version 2>/dev/null || echo 'Not found')" +echo "- AWS CLI: $(aws --version 2>/dev/null || echo 'Not found')" +echo "- kubectl: $(kubectl version --client 2>/dev/null || echo 'Not found')" +echo "- helm: $(helm version --short 2>/dev/null || echo 'Not found')" +echo "- jq: $(jq --version 2>/dev/null || echo 'Not found')" +echo "- Docker: $(docker --version 2>/dev/null || echo 'Not found')" +echo "" +echo "Setup complete! Host is ready for minikube operations." diff --git a/scripts/minikube/minikube_host.sh b/scripts/minikube/minikube_host.sh index ab410d7bc..c383ac09c 100755 --- a/scripts/minikube/minikube_host.sh +++ b/scripts/minikube/minikube_host.sh @@ -32,16 +32,6 @@ fi kubeconfig_path="${HOME}/.operator-dev/s390-host.kubeconfig" configure() { - shift 1 - arch=${1-"$(uname -m)"} - - echo "Configuring minikube host ${S390_HOST_NAME} (${host_url}) with architecture ${arch}" - - if [[ "${cmd}" == "configure" && ! "${arch}" =~ ^(s390x|ppc64le|x86_64|aarch64)$ ]]; then - echo "'configure' command supports the following architectures: s390x, ppc64le, x86_64, aarch64" - exit 1 - fi - ssh -T -q "${host_url}" "sudo chown \$(whoami):\$(whoami) ~/.docker || true; mkdir -p ~/.docker" if [[ -f "${HOME}/.docker/config.json" ]]; then echo "Copying local ~/.docker/config.json authorization credentials to s390x host" @@ -50,7 +40,7 @@ configure() { sync - ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; scripts/dev/switch_context.sh root-context; scripts/minikube/setup_minikube_host.sh ${arch}" + ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; scripts/dev/switch_context.sh root-context; scripts/minikube/setup_minikube_host.sh " } sync() { From 433cdc17eaa8eb06ac72d150a6950fe29f528c15 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 30 Jul 2025 14:02:49 +0200 Subject: [PATCH 017/164] fix jq --- scripts/evergreen/setup_jq.sh | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/scripts/evergreen/setup_jq.sh b/scripts/evergreen/setup_jq.sh index 23ad3e723..1f260883a 100755 --- a/scripts/evergreen/setup_jq.sh +++ b/scripts/evergreen/setup_jq.sh @@ -10,6 +10,32 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh source scripts/funcs/install -arch=$(uname -m) +# Detect and map architecture for jq releases +detect_jq_architecture() { + local arch + arch=$(uname -m) + + case "${arch}" in + x86_64) + echo "amd64" + ;; + aarch64|arm64) + echo "arm64" + ;; + ppc64le) + echo "ppc64el" # jq uses ppc64el instead of ppc64le + ;; + s390x) + echo "s390x" + ;; + *) + echo "Error: Unsupported architecture for jq: ${arch}" >&2 + exit 1 + ;; + esac +} -download_and_install_binary "${PROJECT_DIR:-.}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${arch}" +jq_arch=$(detect_jq_architecture) +echo "Detected architecture: $(uname -m), using jq architecture: ${jq_arch}" + +download_and_install_binary "${PROJECT_DIR:-.}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${jq_arch}" From 161252561c1f457b636d2dc8827f065fc84f0ba5 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 30 Jul 2025 15:35:54 +0200 Subject: [PATCH 018/164] fix docker, minikube etc --- .evergreen.yml | 8 +- scripts/evergreen/e2e/e2e.sh | 2 +- scripts/evergreen/setup_minikube_host.sh | 18 ++ scripts/minikube/install-docker.sh | 58 ++----- scripts/minikube/install-minikube.sh | 200 ++++------------------- scripts/minikube/setup_minikube_host.sh | 67 ++++---- 6 files changed, 106 insertions(+), 247 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index 31655847c..98eaf8a75 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1197,7 +1197,13 @@ task_groups: - name: e2e_smoke_ibm_task_group max_hosts: -1 <<: *setup_group_ibm - <<: *setup_and_teardown_task + setup_task_can_fail_task: true + setup_task: + - func: cleanup_exec_environment + teardown_task_can_fail_task: true + teardown_task: + - func: upload_e2e_logs + - func: teardown_kubernetes_environment tasks: - e2e_replica_set <<: *teardown_group diff --git a/scripts/evergreen/e2e/e2e.sh b/scripts/evergreen/e2e/e2e.sh index 12f839b86..eb6140a22 100755 --- a/scripts/evergreen/e2e/e2e.sh +++ b/scripts/evergreen/e2e/e2e.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -set -Eeou pipefail +set -Eeoux pipefail start_time=$(date +%s) diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index eb2e7906c..c96cfe9c5 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -50,6 +50,24 @@ run_setup_step "IBM Host Setup" "scripts/minikube/setup_minikube_host.sh" run_setup_step "Docker Authentication" "scripts/dev/configure_docker_auth.sh" +# Setup Kubernetes cluster after Docker is properly configured +echo "" +echo ">>> Setting up Kubernetes cluster" +if [[ "${KUBE_ENVIRONMENT_NAME:-}" == "kind" ]]; then + run_setup_step "Kind Kubernetes Cluster" "scripts/dev/recreate_kind_cluster.sh" "kind" +elif [[ "${KUBE_ENVIRONMENT_NAME:-}" == "minikube" ]]; then + echo ">>> Running: Minikube Kubernetes Cluster" + echo ">>> Command: minikube start --profile=${MINIKUBE_PROFILE:-mongodb-e2e} --driver=docker --memory=8192mb --cpus=4" + if minikube start --profile="${MINIKUBE_PROFILE:-mongodb-e2e}" --driver=docker --memory=8192mb --cpus=4; then + echo "✅ Minikube Kubernetes Cluster completed successfully" + else + echo "❌ Minikube Kubernetes Cluster failed" + exit 1 + fi +else + echo "⚠️ No Kubernetes environment specified (KUBE_ENVIRONMENT_NAME not set)" +fi + echo "" echo "==========================================" echo "✅ Minikube host setup completed successfully!" diff --git a/scripts/minikube/install-docker.sh b/scripts/minikube/install-docker.sh index 04ae3f7d8..164d94c3a 100755 --- a/scripts/minikube/install-docker.sh +++ b/scripts/minikube/install-docker.sh @@ -1,25 +1,10 @@ #!/usr/bin/env bash set -Eeou pipefail -# Script to install Docker on s390x architecture (specifically for RHEL/Ubuntu based systems) - -print_usage() { - echo "Usage: $0 [options]" - echo "Options:" - echo " -h, --help Show this help message" - echo " -u, --user Username to add to docker group (optional)" - echo "" - echo "This script installs Docker on s390x architecture systems." -} - DOCKER_USER="" while [[ $# -gt 0 ]]; do case $1 in - -h|--help) - print_usage - exit 0 - ;; -u|--user) DOCKER_USER="$2" shift 2 @@ -32,7 +17,7 @@ while [[ $# -gt 0 ]]; do esac done -echo "Installing Docker on s390x architecture..." +echo "Installing Docker" # Detect OS if [[ -f /etc/redhat-release ]]; then @@ -47,41 +32,31 @@ fi # Install Docker based on OS if [[ "$OS_TYPE" == "rhel" ]]; then echo "Detected RHEL/CentOS system..." - - # Remove any existing Docker packages - sudo yum remove -y docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine || true - - # Install required packages (some may not exist on newer RHEL versions) - sudo yum install -y yum-utils || echo "yum-utils already installed or unavailable" - sudo yum install -y device-mapper-persistent-data lvm2 || echo "device-mapper packages may not be available on this system" - + # Add Docker repository sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo - + # Install Docker CE sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin - + elif [[ "$OS_TYPE" == "debian" ]]; then echo "Detected Ubuntu/Debian system..." - - # Remove any existing Docker packages - sudo apt-get remove -y docker docker-engine docker.io containerd runc || true - + # Update package index sudo apt-get update - + # Install required packages sudo apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release - + # Add Docker's official GPG key curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg - + # Set up stable repository echo "deb [arch=s390x signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - + # Update package index again sudo apt-get update - + # Install Docker CE sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin fi @@ -100,9 +75,12 @@ fi # Verify installation echo "Verifying Docker installation..." sudo docker --version -sudo docker run --rm hello-world -echo "Docker installation completed successfully!" -echo "" -echo "If you added a user to the docker group, they need to log out and log back in." -echo "You can also run 'newgrp docker' to apply the group membership in the current session." \ No newline at end of file +# Test docker access with newgrp (temporary group membership) +echo "Testing Docker access..." +if newgrp docker -c 'docker ps' >/dev/null 2>&1; then + echo "✅ Docker access confirmed" +else + echo "⚠️ Docker group membership requires logout/login to take effect" + echo "Continuing with setup..." +fi diff --git a/scripts/minikube/install-minikube.sh b/scripts/minikube/install-minikube.sh index 3e6dd4e13..903351960 100755 --- a/scripts/minikube/install-minikube.sh +++ b/scripts/minikube/install-minikube.sh @@ -1,73 +1,35 @@ #!/usr/bin/env bash set -Eeou pipefail -# Script to install and configure minikube on s390x architecture - -print_usage() { - echo "Usage: $0 [options]" - echo "Options:" - echo " -h, --help Show this help message" - echo " -v, --version VERSION Minikube version to install (default: latest)" - echo " -k, --kubernetes VER Kubernetes version (default: latest stable)" - echo " -m, --memory MEMORY Memory allocation (default: 8192mb)" - echo " -c, --cpus CPUS CPU allocation (default: 4)" - echo " --profile PROFILE Minikube profile name (default: minikube)" - echo " --start Start minikube after installation" - echo "" - echo "This script installs minikube on s390x architecture and configures it for MongoDB Kubernetes e2e testing." -} - -MINIKUBE_VERSION="latest" -K8S_VERSION="" -MEMORY="8192" -CPUS="4" -PROFILE="minikube" -START_MINIKUBE="false" - -while [[ $# -gt 0 ]]; do - case $1 in - -h|--help) - print_usage - exit 0 - ;; - -v|--version) - MINIKUBE_VERSION="$2" - shift 2 - ;; - -k|--kubernetes) - K8S_VERSION="$2" - shift 2 - ;; - -m|--memory) - MEMORY="$2" - shift 2 - ;; - -c|--cpus) - CPUS="$2" - shift 2 - ;; - --profile) - PROFILE="$2" - shift 2 - ;; - --start) - START_MINIKUBE="true" - shift - ;; - *) - echo "Unknown option: $1" - print_usage - exit 1 - ;; - esac -done - -echo "Installing minikube on s390x architecture..." +source scripts/dev/set_env_context.sh + +# Detect architecture +ARCH=$(uname -m) +case "${ARCH}" in + x86_64) + MINIKUBE_ARCH="amd64" + ;; + aarch64) + MINIKUBE_ARCH="arm64" + ;; + ppc64le) + MINIKUBE_ARCH="ppc64le" + ;; + s390x) + MINIKUBE_ARCH="s390x" + ;; + *) + echo "Error: Unsupported architecture: ${ARCH}" + echo "Supported architectures: x86_64, aarch64, ppc64le, s390x" + exit 1 + ;; +esac + +echo "Installing minikube on ${ARCH} architecture..." # Verify Docker is installed if ! command -v docker &> /dev/null; then echo "Error: Docker is required but not installed. Please install Docker first." - echo "You can use the install-docker.sh script in this directory." exit 1 fi @@ -77,114 +39,12 @@ if ! docker info &> /dev/null; then exit 1 fi -# Install kubectl if not present -if ! command -v kubectl &> /dev/null; then - echo "Installing kubectl..." - - # Get the latest kubectl version if not specified - if [[ -z "$K8S_VERSION" ]]; then - K8S_VERSION=$(curl -L -s https://dl.k8s.io/release/stable.txt) - fi - - # Download kubectl for s390x - curl -LO "https://dl.k8s.io/release/${K8S_VERSION}/bin/linux/s390x/kubectl" - - # Verify the binary - curl -LO "https://dl.k8s.io/${K8S_VERSION}/bin/linux/s390x/kubectl.sha256" - echo "$(cat kubectl.sha256) kubectl" | sha256sum --check - - # Install kubectl - sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl - rm -f kubectl kubectl.sha256 - - echo "kubectl installed successfully" -fi - # Install minikube -echo "Installing minikube..." - -if [[ "$MINIKUBE_VERSION" == "latest" ]]; then - # Get the latest minikube version - MINIKUBE_VERSION=$(curl -s https://api.github.com/repos/kubernetes/minikube/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') -fi - -# Download minikube for s390x -curl -Lo minikube "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-s390x" +echo "Installing minikube for ${ARCH}..." -# Make it executable and install -chmod +x minikube -sudo install minikube /usr/local/bin/ +MINIKUBE_VERSION=$(curl -s https://api.github.com/repos/kubernetes/minikube/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') -# Clean up -rm -f minikube - -echo "Minikube ${MINIKUBE_VERSION} installed successfully" - -# Configure minikube for MongoDB Kubernetes testing -echo "Configuring minikube for MongoDB Kubernetes e2e testing..." - -# Set default driver to docker -minikube config set driver docker - -# Configure resource limits -minikube config set memory "${MEMORY}mb" -minikube config set cpus "${CPUS}" - -# Enable required addons for testing -ADDONS=( - "storage-provisioner" - "default-storageclass" - "volumesnapshots" - "csi-hostpath-driver" -) - -echo "Minikube configuration completed." - -if [[ "$START_MINIKUBE" == "true" ]]; then - echo "Starting minikube cluster with profile '${PROFILE}'..." - - # Start minikube with specific configuration for MongoDB testing - minikube start \ - --profile="${PROFILE}" \ - --driver=docker \ - --memory="${MEMORY}mb" \ - --cpus="${CPUS}" \ - --disk-size=50g \ - --extra-config=kubelet.authentication-token-webhook=true \ - --extra-config=kubelet.authorization-mode=Webhook \ - --extra-config=scheduler.bind-address=0.0.0.0 \ - --extra-config=controller-manager.bind-address=0.0.0.0 \ - ${K8S_VERSION:+--kubernetes-version=$K8S_VERSION} - - # Wait for cluster to be ready - echo "Waiting for cluster to be ready..." - kubectl wait --for=condition=Ready nodes --all --timeout=300s - - # Enable addons - for addon in "${ADDONS[@]}"; do - echo "Enabling addon: $addon" - minikube addons enable "$addon" --profile="${PROFILE}" || true - done - - # Create directories that MongoDB tests expect (similar to kind setup) - echo "Setting up test directories..." - minikube ssh --profile="${PROFILE}" -- 'sudo mkdir -p /opt/data/mongo-data-{0..2} /opt/data/mongo-logs-{0..2}' - minikube ssh --profile="${PROFILE}" -- 'sudo chmod 777 /opt/data/mongo-data-* /opt/data/mongo-logs-*' - - echo "Minikube cluster started successfully!" - echo "" - echo "To use this cluster:" - echo " export KUBECONFIG=\$(minikube kubeconfig --profile=${PROFILE})" - echo " kubectl get nodes" - echo "" - echo "To stop the cluster:" - echo " minikube stop --profile=${PROFILE}" -else - echo "" - echo "Minikube installed but not started." - echo "To start minikube later, run:" - echo " minikube start --profile=${PROFILE} --driver=docker --memory=${MEMORY}mb --cpus=${CPUS}" -fi +# Download minikube for detected architecture +download_and_install_binary "${PROJECT_DIR:-.}/bin" minikube "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-${MINIKUBE_ARCH}" -echo "" -echo "Installation completed successfully!" \ No newline at end of file +echo "Minikube ${MINIKUBE_VERSION} installed successfully for ${ARCH}" diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index 7628c388c..a8c4cf70c 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -65,51 +65,48 @@ download_minikube() { download_docker() { echo "Installing Docker for ${ARCH}..." + scripts/minikube/install-docker.sh --user "$(whoami)" } -# Setup group for minikube based on architecture -setup_group() { - echo "Setting up group configuration for ${ARCH}..." - - # Set SETUP_GROUP evergreen variable based on architecture - case "${ARCH}" in - s390x) - export SETUP_GROUP="s390x_minikube" - ;; - ppc64le) - export SETUP_GROUP="ppc64le_minikube" - ;; - x86_64) - export SETUP_GROUP="x86_64_minikube" - ;; - aarch64) - export SETUP_GROUP="arm64_minikube" - ;; - *) - export SETUP_GROUP="unknown_minikube" - ;; - esac - - echo "SETUP_GROUP set to: ${SETUP_GROUP}" - - # Add to bashrc for persistence - echo "export SETUP_GROUP=${SETUP_GROUP}" >> ~/.bashrc +start_minikube() { + echo "Starting minikube cluster..." + local profile=${MINIKUBE_PROFILE:-mongodb-e2e} + + if minikube start --profile="${profile}" --driver=docker --memory=8192mb --cpus=4; then + echo "✅ Minikube cluster started successfully" + + # Test cluster connectivity + if kubectl --kubeconfig="$(minikube kubeconfig --profile="${profile}")" get nodes >/dev/null 2>&1; then + echo "✅ Cluster connectivity verified" + else + echo "⚠️ Cluster connectivity test failed - may need manual intervention" + fi + else + echo "⚠️ Minikube start failed - likely due to docker permissions" + echo "This will be resolved after logout/login" + fi } check_disk_space set_limits -setup_group - -download_minikube & download_docker & +download_minikube & wait -echo "Setting up minikube environment variables..." -echo 'export KUBE_ENVIRONMENT_NAME=minikube' >> ~/.bashrc -echo 'export MINIKUBE_PROFILE=${MINIKUBE_PROFILE:-mongodb-e2e}' >> ~/.bashrc -echo 'export KUBECONFIG=$(minikube kubeconfig --profile=${MINIKUBE_PROFILE:-mongodb-e2e} 2>/dev/null || echo ~/.kube/config)' >> ~/.bashrc +# Start minikube cluster +start_minikube echo "Minikube host setup completed successfully for ${ARCH}!" -echo "SETUP_GROUP: ${SETUP_GROUP}" + +# Final status +echo "" +echo "==========================================" +echo "✅ Setup Summary" +echo "==========================================" +echo "Architecture: ${ARCH}" +echo "Minikube Profile: ${MINIKUBE_PROFILE:-mongodb-e2e}" +echo "" +echo "If docker permissions failed, logout and login, then run:" +echo " minikube start --profile=\${MINIKUBE_PROFILE:-mongodb-e2e} --driver=docker --memory=8192mb --cpus=4" From bf8e64ae74eacb2a6721418cd9fb63de64cc1a03 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 30 Jul 2025 15:57:20 +0200 Subject: [PATCH 019/164] remove docker group management --- scripts/evergreen/setup_minikube_host.sh | 18 ++++++--------- scripts/minikube/install-docker.sh | 11 ++++++--- scripts/minikube/setup_minikube_host.sh | 29 ++---------------------- 3 files changed, 17 insertions(+), 41 deletions(-) diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index c96cfe9c5..36aa423ee 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -53,17 +53,13 @@ run_setup_step "Docker Authentication" "scripts/dev/configure_docker_auth.sh" # Setup Kubernetes cluster after Docker is properly configured echo "" echo ">>> Setting up Kubernetes cluster" -if [[ "${KUBE_ENVIRONMENT_NAME:-}" == "kind" ]]; then - run_setup_step "Kind Kubernetes Cluster" "scripts/dev/recreate_kind_cluster.sh" "kind" -elif [[ "${KUBE_ENVIRONMENT_NAME:-}" == "minikube" ]]; then - echo ">>> Running: Minikube Kubernetes Cluster" - echo ">>> Command: minikube start --profile=${MINIKUBE_PROFILE:-mongodb-e2e} --driver=docker --memory=8192mb --cpus=4" - if minikube start --profile="${MINIKUBE_PROFILE:-mongodb-e2e}" --driver=docker --memory=8192mb --cpus=4; then - echo "✅ Minikube Kubernetes Cluster completed successfully" - else - echo "❌ Minikube Kubernetes Cluster failed" - exit 1 - fi +echo ">>> Command: minikube start --profile=${MINIKUBE_PROFILE:-mongodb-e2e} --driver=docker --memory=8192mb --cpus=4" +if minikube start --profile="${MINIKUBE_PROFILE:-mongodb-e2e}" --driver=docker --memory=8192mb --cpus=4; then + echo "✅ Minikube Kubernetes Cluster completed successfully" +else + echo "❌ Minikube Kubernetes Cluster failed" + exit 1 +fi else echo "⚠️ No Kubernetes environment specified (KUBE_ENVIRONMENT_NAME not set)" fi diff --git a/scripts/minikube/install-docker.sh b/scripts/minikube/install-docker.sh index 164d94c3a..f645a6a97 100755 --- a/scripts/minikube/install-docker.sh +++ b/scripts/minikube/install-docker.sh @@ -76,11 +76,16 @@ fi echo "Verifying Docker installation..." sudo docker --version -# Test docker access with newgrp (temporary group membership) +# Test docker access echo "Testing Docker access..." -if newgrp docker -c 'docker ps' >/dev/null 2>&1; then +if docker ps >/dev/null 2>&1; then echo "✅ Docker access confirmed" else - echo "⚠️ Docker group membership requires logout/login to take effect" + echo "⚠️ Docker access test failed - checking if running as root..." + if [[ $(id -u) -eq 0 ]]; then + echo "Running as root - Docker should work" + else + echo "Docker group membership may require logout/login to take effect" + fi echo "Continuing with setup..." fi diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index a8c4cf70c..bd2ac72bf 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -1,6 +1,7 @@ #!/usr/bin/env bash # this script downloads necessary tooling for alternative architectures (s390x, ppc64le) using minikube (similar to setup_evg_host.sh) +source scripts/dev/set_env_context.sh set -Eeou pipefail @@ -65,27 +66,7 @@ download_minikube() { download_docker() { echo "Installing Docker for ${ARCH}..." - - scripts/minikube/install-docker.sh --user "$(whoami)" -} - -start_minikube() { - echo "Starting minikube cluster..." - local profile=${MINIKUBE_PROFILE:-mongodb-e2e} - - if minikube start --profile="${profile}" --driver=docker --memory=8192mb --cpus=4; then - echo "✅ Minikube cluster started successfully" - - # Test cluster connectivity - if kubectl --kubeconfig="$(minikube kubeconfig --profile="${profile}")" get nodes >/dev/null 2>&1; then - echo "✅ Cluster connectivity verified" - else - echo "⚠️ Cluster connectivity test failed - may need manual intervention" - fi - else - echo "⚠️ Minikube start failed - likely due to docker permissions" - echo "This will be resolved after logout/login" - fi + scripts/minikube/install-docker.sh } check_disk_space @@ -95,9 +76,6 @@ download_minikube & wait -# Start minikube cluster -start_minikube - echo "Minikube host setup completed successfully for ${ARCH}!" # Final status @@ -107,6 +85,3 @@ echo "✅ Setup Summary" echo "==========================================" echo "Architecture: ${ARCH}" echo "Minikube Profile: ${MINIKUBE_PROFILE:-mongodb-e2e}" -echo "" -echo "If docker permissions failed, logout and login, then run:" -echo " minikube start --profile=\${MINIKUBE_PROFILE:-mongodb-e2e} --driver=docker --memory=8192mb --cpus=4" From fb87f4d6e4bacdd55b4663563d25bb101ccb2d9b Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 30 Jul 2025 16:59:44 +0200 Subject: [PATCH 020/164] Fix agent version for default case --- scripts/release/atomic_pipeline.py | 31 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 16 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 3653e7b27..afa3fda41 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -9,6 +9,7 @@ from concurrent.futures import ProcessPoolExecutor from queue import Queue from typing import Callable, Dict, List, Optional, Tuple, Union +from copy import copy import requests import semver @@ -37,6 +38,7 @@ # TODO: rename architecture -> platform everywhere + def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: if value is None: return [] @@ -485,15 +487,14 @@ def build_community_image(build_configuration: BuildConfiguration, image_type: s "version": version, "GOLANG_VERSION": golang_version, "architecture": arch, - "TARGETARCH": arch, + "TARGETARCH": arch, # TODO: redundant ? } multi_arch_args_list.append(arch_args) # Create a copy of build_configuration with overridden platforms - from copy import copy build_config_copy = copy(build_configuration) build_config_copy.platforms = platforms - + build_image_generic( image_name=image_name, dockerfile_path=dockerfile_path, @@ -525,10 +526,13 @@ def build_agent_pipeline( mongodb_agent_url_ubi: str, agent_version, ): - version = f"{agent_version}_{image_version}" - + build_configuration_copy = copy(build_configuration) + build_configuration_copy.version = image_version + print( + f"======== Building agent pipeline for version {image_version}, build configuration version: {build_configuration.version}" + ) args = { - "version": version, + "version": image_version, "agent_version": agent_version, "ubi_suffix": "-ubi", "release_version": image_version, @@ -541,7 +545,7 @@ def build_agent_pipeline( build_image_generic( image_name="mongodb-agent-ubi", dockerfile_path="docker/mongodb-agent/Dockerfile", - build_configuration=build_configuration, + build_configuration=build_configuration_copy, extra_args=args, ) @@ -596,6 +600,7 @@ def build_multi_arch_agent_in_sonar( multi_arch_args_list=joined_args, ) + # TODO: why versions are wrong -> 13.35.0.9498-1_13.35.0.9498-1_6874c19d2aab5d0007820c51 ; duplicate # TODO: figure out why I hit toomanyrequests: Rate exceeded with the new pipeline def build_agent_default_case(build_configuration: BuildConfiguration): @@ -625,18 +630,11 @@ def build_agent_default_case(build_configuration: BuildConfiguration): max_workers = build_configuration.parallel_factor with ProcessPoolExecutor(max_workers=max_workers) as executor: logger.info(f"running with factor of {max_workers}") + print(f"======= Versions to build {agent_versions_to_build} =======") for agent_version in agent_versions_to_build: # We don't need to keep create and push the same image on every build. # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. - # if build_configuration.is_release_step_executed() or build_configuration.all_agents: - # tasks_queue.put( - # executor.submit( - # build_multi_arch_agent_in_sonar, - # build_configuration, - # agent_version[0], - # agent_version[1], - # ) - # ) + print(f"======= Building Agent {agent_version} =======") _build_agent_operator( agent_version, build_configuration, @@ -648,6 +646,7 @@ def build_agent_default_case(build_configuration: BuildConfiguration): queue_exception_handling(tasks_queue) + # TODO: for now, release agents ECR release versions with image:version_version (duplicated) def build_agent_on_agent_bump(build_configuration: BuildConfiguration): """ From c05e1806b7a9bc15ecaf63ae233511cad23ec1a3 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 30 Jul 2025 17:00:03 +0200 Subject: [PATCH 021/164] Lindt --- scripts/release/build_context.py | 13 ++--- scripts/release/build_images.py | 86 ++++++++++++++++---------------- scripts/release/main.py | 10 ++-- 3 files changed, 57 insertions(+), 52 deletions(-) diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py index f163c3818..04f97f84d 100644 --- a/scripts/release/build_context.py +++ b/scripts/release/build_context.py @@ -29,7 +29,9 @@ def infer_scenario_from_environment(cls) -> "BuildScenario": scenario = BuildScenario.PATCH logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") elif is_evg: - scenario = BuildScenario.MASTER # TODO: ultimately we won't have RELEASE variant and master will push to staging + scenario = ( + BuildScenario.MASTER + ) # TODO: ultimately we won't have RELEASE variant and master will push to staging logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") else: scenario = BuildScenario.DEVELOPMENT @@ -55,15 +57,15 @@ def from_scenario(cls, scenario: BuildScenario) -> "BuildContext": git_tag = os.getenv("triggered_by_git_tag") patch_id = os.getenv("version_id") signing_enabled = scenario == BuildScenario.RELEASE - + return cls( scenario=scenario, git_tag=git_tag, patch_id=patch_id, signing_enabled=signing_enabled, - version=git_tag or patch_id, #TODO: update this + version=git_tag or patch_id, # TODO: update this ) - + def get_version(self) -> str: """Gets the version that will be used to tag the images.""" if self.scenario == BuildScenario.RELEASE: @@ -71,11 +73,10 @@ def get_version(self) -> str: if self.patch_id: return self.patch_id return "latest" - + def get_base_registry(self) -> str: """Get the base registry URL for the current scenario.""" if self.scenario == BuildScenario.RELEASE: return os.environ.get("STAGING_REPO_URL") else: return os.environ.get("BASE_REPO_URL") - diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 66e6b0d3a..c4b19ab34 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -15,6 +15,7 @@ from lib.sonar.sonar import create_ecr_repository from scripts.evergreen.release.images_signing import sign_image, verify_signature + # TODO: self review the PR def ecr_login_boto3(region: str, account_id: str): """ @@ -45,36 +46,38 @@ def ecr_login_boto3(region: str, account_id: str): # TODO: don't do it every time ? Check for existence without relying on Exception def ensure_buildx_builder(builder_name: str = "multiarch") -> str: - """ - Ensures a Docker Buildx builder exists for multi-platform builds. - - :param builder_name: Name for the buildx builder - :return: The builder name that was created or reused - """ - docker = python_on_whales.docker - - try: - docker.buildx.create( - name=builder_name, - driver="docker-container", - use=True, - bootstrap=True, - ) - logger.info(f"Created new buildx builder: {builder_name}") - except DockerException as e: - if f'existing instance for "{builder_name}"' in str(e): - logger.info(f"Builder '{builder_name}' already exists – reusing it.") - # Make sure it's the current one: - docker.buildx.use(builder_name) - else: - # Some other failure happened - logger.error(f"Failed to create buildx builder: {e}") - raise - - return builder_name - - -def build_image(tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, push: bool = True, platforms: list[str] = None): + """ + Ensures a Docker Buildx builder exists for multi-platform builds. + + :param builder_name: Name for the buildx builder + :return: The builder name that was created or reused + """ + docker = python_on_whales.docker + + try: + docker.buildx.create( + name=builder_name, + driver="docker-container", + use=True, + bootstrap=True, + ) + logger.info(f"Created new buildx builder: {builder_name}") + except DockerException as e: + if f'existing instance for "{builder_name}"' in str(e): + logger.info(f"Builder '{builder_name}' already exists – reusing it.") + # Make sure it's the current one: + docker.buildx.use(builder_name) + else: + # Some other failure happened + logger.error(f"Failed to create buildx builder: {e}") + raise + + return builder_name + + +def build_image( + tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, push: bool = True, platforms: list[str] = None +): """ Build a Docker image using python_on_whales and Docker Buildx for multi-architecture support. @@ -86,25 +89,25 @@ def build_image(tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, :param platforms: List of target platforms (e.g., ["linux/amd64", "linux/arm64"]) """ docker = python_on_whales.docker - + try: # Convert build args to the format expected by python_on_whales build_args = {k: str(v) for k, v in args.items()} if args else {} - + # Set default platforms if not specified if platforms is None: platforms = ["linux/amd64"] - + logger.info(f"Building image: {tag}") logger.info(f"Platforms: {platforms}") logger.info(f"Dockerfile: {dockerfile}") logger.info(f"Build context: {path}") logger.debug(f"Build args: {build_args}") - + # Use buildx for multi-platform builds if len(platforms) > 1: logger.info(f"Multi-platform build for {len(platforms)} architectures") - + # We need a special driver to handle multi platform builds builder_name = ensure_buildx_builder("multiarch") @@ -117,18 +120,17 @@ def build_image(tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, builder=builder_name, build_args=build_args, push=push, - provenance=False, # To not get an untagged image for single platform builds + provenance=False, # To not get an untagged image for single platform builds pull=False, # Don't always pull base images ) - + logger.info(f"Successfully built {'and pushed' if push else ''} {tag}") - + except Exception as e: logger.error(f"Failed to build image {tag}: {e}") raise RuntimeError(f"Failed to build image {tag}: {str(e)}") - def process_image( image_name: str, image_tag: str, @@ -141,7 +143,7 @@ def process_image( push: bool = True, ): # Login to ECR using boto3 - ecr_login_boto3(region="us-east-1", account_id="268558157000") # TODO: use environment variables + ecr_login_boto3(region="us-east-1", account_id="268558157000") # TODO: use environment variables # Helper to automatically create registry with correct name should_create_repo = False @@ -157,7 +159,7 @@ def process_image( docker_registry = f"{base_registry}/{image_name}" image_full_uri = f"{docker_registry}:{image_tag}" - + # Build image with docker buildx build_image( tag=image_full_uri, @@ -165,7 +167,7 @@ def process_image( path=build_path, args=dockerfile_args, push=push, - platforms=platforms + platforms=platforms, ) if sign: diff --git a/scripts/release/main.py b/scripts/release/main.py index c3155b044..3a7e4a5f5 100644 --- a/scripts/release/main.py +++ b/scripts/release/main.py @@ -115,7 +115,7 @@ def main(): _setup_tracing() parser = argparse.ArgumentParser(description="Build container images.") - parser.add_argument("image", help="Image to build.") # Required + parser.add_argument("image", help="Image to build.") # Required parser.add_argument("--parallel", action="store_true", help="Build images in parallel.") parser.add_argument("--debug", action="store_true", help="Enable debug logging.") parser.add_argument("--sign", action="store_true", help="Sign images.") @@ -138,7 +138,7 @@ def main(): "--registry", help="Override the base registry instead of resolving from build scenario", ) - + # Agent specific arguments parser.add_argument( "--all-agents", @@ -172,7 +172,9 @@ def build_config_from_args(args): platforms = [p.strip() for p in args.platform.split(",")] SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] if any(p not in SUPPORTED_PLATFORMS for p in platforms): - logger.error(f"Unsupported platform in '{args.platform}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}") + logger.error( + f"Unsupported platform in '{args.platform}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}" + ) sys.exit(1) # Centralized configuration management with overrides @@ -191,7 +193,7 @@ def build_config_from_args(args): version=version, base_registry=registry, parallel=args.parallel, - debug=args.debug, # TODO: is debug used ? + debug=args.debug, # TODO: is debug used ? platforms=platforms, sign=sign, all_agents=all_agents, From 747c4ba9e132e0ef545a3b57192514be44e6a9bb Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 30 Jul 2025 17:02:06 +0200 Subject: [PATCH 022/164] isort --- scripts/release/atomic_pipeline.py | 3 +-- scripts/release/build_images.py | 7 +++---- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index afa3fda41..11de90490 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -7,16 +7,15 @@ import os import shutil from concurrent.futures import ProcessPoolExecutor +from copy import copy from queue import Queue from typing import Callable, Dict, List, Optional, Tuple, Union -from copy import copy import requests import semver from opentelemetry import trace from packaging.version import Version - from lib.base_logger import logger from scripts.evergreen.release.agent_matrix import ( get_supported_operator_versions, diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index c4b19ab34..5e1c1cd0d 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -1,14 +1,13 @@ # This file is the new Sonar import base64 import sys -from typing import Dict - -import python_on_whales -from python_on_whales.exceptions import DockerException import time +from typing import Dict import boto3 +import python_on_whales from botocore.exceptions import BotoCoreError, ClientError +from python_on_whales.exceptions import DockerException import docker from lib.base_logger import logger From 03fd9b8dcbcd1ce8e770d08869c3faf385a973f1 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 30 Jul 2025 17:18:27 +0200 Subject: [PATCH 023/164] Cleanup TODOs --- scripts/release/atomic_pipeline.py | 6 ++---- scripts/release/build_context.py | 4 ++-- scripts/release/build_images.py | 15 +-------------- 3 files changed, 5 insertions(+), 20 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 11de90490..f1e643f06 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -594,14 +594,13 @@ def build_multi_arch_agent_in_sonar( build_image_generic( image_name="mongodb-agent-ubi", dockerfile_path="docker/mongodb-agent-non-matrix/Dockerfile", - build_configuration=build_config_copy, + build_configuration=build_config_copy, #TODO: why ? is_multi_arch=True, multi_arch_args_list=joined_args, ) -# TODO: why versions are wrong -> 13.35.0.9498-1_13.35.0.9498-1_6874c19d2aab5d0007820c51 ; duplicate -# TODO: figure out why I hit toomanyrequests: Rate exceeded with the new pipeline +# TODO: Observed rate limiting (429) sometimes for agent builds in patches def build_agent_default_case(build_configuration: BuildConfiguration): """ Build the agent only for the latest operator for patches and operator releases. @@ -646,7 +645,6 @@ def build_agent_default_case(build_configuration: BuildConfiguration): queue_exception_handling(tasks_queue) -# TODO: for now, release agents ECR release versions with image:version_version (duplicated) def build_agent_on_agent_bump(build_configuration: BuildConfiguration): """ Build the agent matrix (operator version x agent version), triggered by PCT. diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py index 04f97f84d..c083b1f0a 100644 --- a/scripts/release/build_context.py +++ b/scripts/release/build_context.py @@ -23,7 +23,7 @@ def infer_scenario_from_environment(cls) -> "BuildScenario": patch_id = os.getenv("version_id") if git_tag: - scenario = BuildScenario.RELEASE + scenario = BuildScenario.RELEASE # TODO: git tag won't trigger the pipeline, only the promotion process logger.info(f"Build scenario: {scenario} (git_tag: {git_tag})") elif is_patch: scenario = BuildScenario.PATCH @@ -31,7 +31,7 @@ def infer_scenario_from_environment(cls) -> "BuildScenario": elif is_evg: scenario = ( BuildScenario.MASTER - ) # TODO: ultimately we won't have RELEASE variant and master will push to staging + ) # TODO: MASTER -> Staging logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") else: scenario = BuildScenario.DEVELOPMENT diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 5e1c1cd0d..823d187b4 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -15,7 +15,6 @@ from scripts.evergreen.release.images_signing import sign_image, verify_signature -# TODO: self review the PR def ecr_login_boto3(region: str, account_id: str): """ Fetches an auth token from ECR via boto3 and logs @@ -43,7 +42,7 @@ def ecr_login_boto3(region: str, account_id: str): logger.debug(f"ECR login succeeded: {status}") -# TODO: don't do it every time ? Check for existence without relying on Exception +# TODO: use builders = docker.buildx.list() instead of an exception def ensure_buildx_builder(builder_name: str = "multiarch") -> str: """ Ensures a Docker Buildx builder exists for multi-platform builds. @@ -144,18 +143,6 @@ def process_image( # Login to ECR using boto3 ecr_login_boto3(region="us-east-1", account_id="268558157000") # TODO: use environment variables - # Helper to automatically create registry with correct name - should_create_repo = False - if should_create_repo: - repo_to_create = "julienben/staging-temp/" + image_name - logger.debug(f"repo_to_create: {repo_to_create}") - create_ecr_repository(repo_to_create) - logger.info(f"Created repository {repo_to_create}") - - # Set default platforms if none provided TODO: remove from here and do it at higher level later - if platforms is None: - platforms = ["linux/amd64"] - docker_registry = f"{base_registry}/{image_name}" image_full_uri = f"{docker_registry}:{image_tag}" From 540b4200a160fea89a7837b3fbd19b3b9e6b6379 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 30 Jul 2025 18:07:25 +0200 Subject: [PATCH 024/164] installation works, minikube starts but docker doesn't start --- scripts/evergreen/setup_minikube_host.sh | 7 ++--- scripts/minikube/install-docker.sh | 40 +++++++++--------------- scripts/minikube/install-minikube.sh | 14 +-------- scripts/minikube/setup_minikube_host.sh | 32 +++++++++++++++++++ 4 files changed, 50 insertions(+), 43 deletions(-) diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index 36aa423ee..ffc571d3b 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -5,7 +5,7 @@ # Can be run on static hosts for testing and verification source scripts/dev/set_env_context.sh -set -Eeou pipefail +set -Eeoux pipefail echo "==========================================" echo "Setting up minikube host with multi-architecture support" @@ -54,15 +54,14 @@ run_setup_step "Docker Authentication" "scripts/dev/configure_docker_auth.sh" echo "" echo ">>> Setting up Kubernetes cluster" echo ">>> Command: minikube start --profile=${MINIKUBE_PROFILE:-mongodb-e2e} --driver=docker --memory=8192mb --cpus=4" + +# Start minikube cluster for CI if minikube start --profile="${MINIKUBE_PROFILE:-mongodb-e2e}" --driver=docker --memory=8192mb --cpus=4; then echo "✅ Minikube Kubernetes Cluster completed successfully" else echo "❌ Minikube Kubernetes Cluster failed" exit 1 fi -else - echo "⚠️ No Kubernetes environment specified (KUBE_ENVIRONMENT_NAME not set)" -fi echo "" echo "==========================================" diff --git a/scripts/minikube/install-docker.sh b/scripts/minikube/install-docker.sh index f645a6a97..770c75e5a 100755 --- a/scripts/minikube/install-docker.sh +++ b/scripts/minikube/install-docker.sh @@ -1,21 +1,7 @@ #!/usr/bin/env bash set -Eeou pipefail -DOCKER_USER="" - -while [[ $# -gt 0 ]]; do - case $1 in - -u|--user) - DOCKER_USER="$2" - shift 2 - ;; - *) - echo "Unknown option: $1" - print_usage - exit 1 - ;; - esac -done +source scripts/dev/set_env_context.sh echo "Installing Docker" @@ -65,11 +51,15 @@ fi sudo systemctl start docker sudo systemctl enable docker -# Add user to docker group if specified -if [[ -n "$DOCKER_USER" ]]; then - echo "Adding user '$DOCKER_USER' to docker group..." - sudo usermod -aG docker "$DOCKER_USER" - echo "Note: User '$DOCKER_USER' needs to log out and log back in for group membership to take effect." +# Add current CI user to docker group and change socket permissions +current_user=$(whoami) +if [[ "$current_user" != "root" ]]; then + echo "Adding CI user '$current_user' to docker group..." + sudo usermod -aG docker "$current_user" + + # For CI: Change docker socket permissions to allow immediate access + echo "Setting docker socket permissions for CI..." + sudo chmod 666 /var/run/docker.sock fi # Verify installation @@ -81,11 +71,9 @@ echo "Testing Docker access..." if docker ps >/dev/null 2>&1; then echo "✅ Docker access confirmed" else - echo "⚠️ Docker access test failed - checking if running as root..." - if [[ $(id -u) -eq 0 ]]; then - echo "Running as root - Docker should work" - else - echo "Docker group membership may require logout/login to take effect" + echo "❌ Docker access failed - CI may not work properly" + echo "Trying with sudo..." + if sudo docker ps >/dev/null 2>&1; then + echo "⚠️ Docker only works with sudo" fi - echo "Continuing with setup..." fi diff --git a/scripts/minikube/install-minikube.sh b/scripts/minikube/install-minikube.sh index 903351960..b82c829b6 100755 --- a/scripts/minikube/install-minikube.sh +++ b/scripts/minikube/install-minikube.sh @@ -2,6 +2,7 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +source scripts/funcs/install # Detect architecture ARCH=$(uname -m) @@ -26,19 +27,6 @@ case "${ARCH}" in esac echo "Installing minikube on ${ARCH} architecture..." - -# Verify Docker is installed -if ! command -v docker &> /dev/null; then - echo "Error: Docker is required but not installed. Please install Docker first." - exit 1 -fi - -# Verify Docker is running -if ! docker info &> /dev/null; then - echo "Error: Docker is not running. Please start Docker service." - exit 1 -fi - # Install minikube echo "Installing minikube for ${ARCH}..." diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index bd2ac72bf..a455fca57 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -76,6 +76,36 @@ download_minikube & wait +echo "" +echo ">>> Verifying minikube installation..." +if command -v minikube &> /dev/null; then + minikube_version=$(minikube version --short 2>/dev/null || minikube version 2>/dev/null | head -n1) + echo "✅ Minikube installed successfully: ${minikube_version}" +else + echo "❌ Minikube installation failed - minikube command not found" + echo "Please check the installation logs above for errors" + exit 1 +fi + +echo "" +echo ">>> Verifying docker installation..." +if command -v docker &> /dev/null; then + docker_version=$(docker --version 2>/dev/null) + echo "✅ Docker installed successfully: ${docker_version}" + + # Check if docker service is running + if systemctl is-active --quiet docker 2>/dev/null || docker info &>/dev/null; then + echo "✅ Docker service is running" + else + echo "⚠️ Docker is installed but service may not be running" + echo "You may need to start docker service: sudo systemctl start docker" + fi +else + echo "❌ Docker installation failed - docker command not found" + echo "Please check the installation logs above for errors" + exit 1 +fi + echo "Minikube host setup completed successfully for ${ARCH}!" # Final status @@ -85,3 +115,5 @@ echo "✅ Setup Summary" echo "==========================================" echo "Architecture: ${ARCH}" echo "Minikube Profile: ${MINIKUBE_PROFILE:-mongodb-e2e}" +echo "Minikube: ${minikube_version}" +echo "Docker: ${docker_version}" From fd8db45fc696456e28e5a4f8011cf6be770b8548 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 31 Jul 2025 16:16:09 +0200 Subject: [PATCH 025/164] podman support --- scripts/dev/configure_docker_auth.sh | 100 ++++++++--- scripts/evergreen/setup_minikube_host.sh | 21 +-- scripts/minikube/install-docker.sh | 79 --------- scripts/minikube/install-minikube.sh | 41 ++++- scripts/minikube/minikube_host.sh | 22 +-- scripts/minikube/setup_minikube_host.sh | 212 ++++++++++++++++++----- 6 files changed, 303 insertions(+), 172 deletions(-) delete mode 100755 scripts/minikube/install-docker.sh diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_docker_auth.sh index dfcb14f0b..74e0d9abc 100755 --- a/scripts/dev/configure_docker_auth.sh +++ b/scripts/dev/configure_docker_auth.sh @@ -8,7 +8,33 @@ source scripts/funcs/checks source scripts/funcs/printing source scripts/funcs/kubernetes +# Detect available container runtime +detect_container_runtime() { + if command -v podman &> /dev/null && podman info &> /dev/null; then + CONTAINER_RUNTIME="podman" + CONFIG_PATH="${HOME}/.config/containers/auth.json" + mkdir -p "$(dirname "${CONFIG_PATH}")" + echo "Using Podman for container authentication" + return 0 + elif command -v docker &> /dev/null; then + CONTAINER_RUNTIME="docker" + CONFIG_PATH="${HOME}/.docker/config.json" + mkdir -p "$(dirname "${CONFIG_PATH}")" + echo "Using Docker for container authentication" + return 0 + else + echo "Error: Neither Docker nor Podman is available" + exit 1 + fi +} + check_docker_daemon_is_running() { + if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then + # Podman doesn't require a daemon + echo "Using Podman (no daemon required)" + return 0 + fi + if [[ "$(uname -s)" != "Linux" ]]; then echo "Skipping docker daemon check when not running in Linux" return 0 @@ -34,71 +60,95 @@ check_docker_daemon_is_running() { remove_element() { config_option="${1}" tmpfile=$(mktemp) - jq 'del(.'"${config_option}"')' ~/.docker/config.json >"${tmpfile}" - cp "${tmpfile}" ~/.docker/config.json + + # Initialize config file if it doesn't exist + if [[ ! -f "${CONFIG_PATH}" ]]; then + echo '{}' > "${CONFIG_PATH}" + fi + + jq 'del(.'"${config_option}"')' "${CONFIG_PATH}" >"${tmpfile}" + cp "${tmpfile}" "${CONFIG_PATH}" rm "${tmpfile}" } -# This is the script which performs docker authentication to different registries that we use (so far ECR and RedHat) -# As the result of this login the ~/.docker/config.json will have all the 'auth' information necessary to work with docker registries +# Container runtime login wrapper +container_login() { + local username="$1" + local registry="$2" + + if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then + podman login --username "${username}" --password-stdin "${registry}" + else + docker login --username "${username}" --password-stdin "${registry}" + fi +} + +# This is the script which performs container authentication to different registries that we use (so far ECR and RedHat) +# As the result of this login the config file will have all the 'auth' information necessary to work with container registries + +# Detect container runtime and set appropriate config path +detect_container_runtime check_docker_daemon_is_running -if [[ -f ~/.docker/config.json ]]; then +if [[ -f "${CONFIG_PATH}" ]]; then if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then - # Check if login is actually required by making a HEAD request to ECR using existing Docker config - echo "Checking if Docker credentials are valid..." - ecr_auth=$(jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' ~/.docker/config.json) + # Check if login is actually required by making a HEAD request to ECR using existing credentials + echo "Checking if container registry credentials are valid..." + ecr_auth=$(jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") if [[ -n "${ecr_auth}" ]]; then http_status=$(curl --head -s -o /dev/null -w "%{http_code}" --max-time 3 "https://268558157000.dkr.ecr.us-east-1.amazonaws.com/v2/dev/mongodb-kubernetes/manifests/latest" \ -H "Authorization: Basic ${ecr_auth}" 2>/dev/null || echo "error/timeout") if [[ "${http_status}" != "401" && "${http_status}" != "403" && "${http_status}" != "error/timeout" ]]; then - echo "Docker credentials are up to date - not performing the new login!" + echo "Container registry credentials are up to date - not performing the new login!" exit fi - echo "Docker login required (HTTP status: ${http_status})" + echo "Container login required (HTTP status: ${http_status})" else - echo "No ECR credentials found in Docker config - login required" + echo "No ECR credentials found in container config - login required" fi fi - title "Performing docker login to ECR registries" + title "Performing container login to ECR registries" - # There could be some leftovers on Evergreen - if grep -q "credsStore" ~/.docker/config.json; then - remove_element "credsStore" - fi - if grep -q "credHelpers" ~/.docker/config.json; then - remove_element "credHelpers" + # There could be some leftovers on Evergreen (Docker-specific, skip for Podman) + if [[ "${CONTAINER_RUNTIME}" == "docker" ]]; then + if grep -q "credsStore" "${CONFIG_PATH}"; then + remove_element "credsStore" + fi + if grep -q "credHelpers" "${CONFIG_PATH}"; then + remove_element "credHelpers" + fi fi fi echo "$(aws --version)}" -aws ecr get-login-password --region "us-east-1" | docker login --username AWS --password-stdin 268558157000.dkr.ecr.us-east-1.amazonaws.com +aws ecr get-login-password --region "us-east-1" | container_login "AWS" "268558157000.dkr.ecr.us-east-1.amazonaws.com" # by default docker tries to store credentials in an external storage (e.g. OS keychain) - not in the config.json # We need to store it as base64 string in config.json instead so we need to remove the "credsStore" element -if grep -q "credsStore" ~/.docker/config.json; then +# This is Docker-specific behavior, Podman stores credentials directly in auth.json +if [[ "${CONTAINER_RUNTIME}" == "docker" ]] && grep -q "credsStore" "${CONFIG_PATH}"; then remove_element "credsStore" # login again to store the credentials into the config.json - aws ecr get-login-password --region "us-east-1" | docker login --username AWS --password-stdin 268558157000.dkr.ecr.us-east-1.amazonaws.com + aws ecr get-login-password --region "us-east-1" | container_login "AWS" "268558157000.dkr.ecr.us-east-1.amazonaws.com" fi -aws ecr get-login-password --region "eu-west-1" | docker login --username AWS --password-stdin 268558157000.dkr.ecr.eu-west-1.amazonaws.com +aws ecr get-login-password --region "eu-west-1" | container_login "AWS" "268558157000.dkr.ecr.eu-west-1.amazonaws.com" if [[ -n "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON:-}" ]]; then # log in to quay.io for the mongodb/mongodb-search-community private repo # TODO remove once we switch to the official repo in Public Preview quay_io_auth_file=$(mktemp) - docker_configjson_tmp=$(mktemp) + config_tmp=$(mktemp) echo "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON}" | base64 -d > "${quay_io_auth_file}" - jq -s '.[0] * .[1]' "${quay_io_auth_file}" ~/.docker/config.json > "${docker_configjson_tmp}" - mv "${docker_configjson_tmp}" ~/.docker/config.json + jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" + mv "${config_tmp}" "${CONFIG_PATH}" rm "${quay_io_auth_file}" fi diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index ffc571d3b..9d4884530 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -46,22 +46,14 @@ run_setup_step "kubectl and helm Setup" "scripts/evergreen/setup_kubectl.sh" run_setup_step "jq Setup" "scripts/evergreen/setup_jq.sh" -run_setup_step "IBM Host Setup" "scripts/minikube/setup_minikube_host.sh" +run_setup_step "Minikube Host Setup with Container Runtime Detection" "scripts/minikube/setup_minikube_host.sh" -run_setup_step "Docker Authentication" "scripts/dev/configure_docker_auth.sh" +run_setup_step "Container Registry Authentication" "scripts/dev/configure_docker_auth.sh" -# Setup Kubernetes cluster after Docker is properly configured +# The minikube cluster is already started by the setup_minikube_host.sh script echo "" -echo ">>> Setting up Kubernetes cluster" -echo ">>> Command: minikube start --profile=${MINIKUBE_PROFILE:-mongodb-e2e} --driver=docker --memory=8192mb --cpus=4" - -# Start minikube cluster for CI -if minikube start --profile="${MINIKUBE_PROFILE:-mongodb-e2e}" --driver=docker --memory=8192mb --cpus=4; then - echo "✅ Minikube Kubernetes Cluster completed successfully" -else - echo "❌ Minikube Kubernetes Cluster failed" - exit 1 -fi +echo ">>> Minikube cluster startup completed by setup_minikube_host.sh" +echo "✅ Minikube cluster is ready for use" echo "" echo "==========================================" @@ -74,6 +66,7 @@ echo "- AWS CLI: $(aws --version 2>/dev/null || echo 'Not found')" echo "- kubectl: $(kubectl version --client 2>/dev/null || echo 'Not found')" echo "- helm: $(helm version --short 2>/dev/null || echo 'Not found')" echo "- jq: $(jq --version 2>/dev/null || echo 'Not found')" -echo "- Docker: $(docker --version 2>/dev/null || echo 'Not found')" +echo "- Container Runtime: $(command -v podman &>/dev/null && echo "Podman $(podman --version 2>/dev/null)" || command -v docker &>/dev/null && echo "Docker $(docker --version 2>/dev/null)" || echo "Not found")" +echo "- Minikube: $(./bin/minikube version --short 2>/dev/null || echo 'Not found')" echo "" echo "Setup complete! Host is ready for minikube operations." diff --git a/scripts/minikube/install-docker.sh b/scripts/minikube/install-docker.sh deleted file mode 100755 index 770c75e5a..000000000 --- a/scripts/minikube/install-docker.sh +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env bash -set -Eeou pipefail - -source scripts/dev/set_env_context.sh - -echo "Installing Docker" - -# Detect OS -if [[ -f /etc/redhat-release ]]; then - OS_TYPE="rhel" -elif [[ -f /etc/debian_version ]]; then - OS_TYPE="debian" -else - echo "Unsupported OS. This script supports RHEL/CentOS and Ubuntu/Debian." - exit 1 -fi - -# Install Docker based on OS -if [[ "$OS_TYPE" == "rhel" ]]; then - echo "Detected RHEL/CentOS system..." - - # Add Docker repository - sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo - - # Install Docker CE - sudo yum install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin - -elif [[ "$OS_TYPE" == "debian" ]]; then - echo "Detected Ubuntu/Debian system..." - - # Update package index - sudo apt-get update - - # Install required packages - sudo apt-get install -y apt-transport-https ca-certificates curl gnupg lsb-release - - # Add Docker's official GPG key - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg - - # Set up stable repository - echo "deb [arch=s390x signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null - - # Update package index again - sudo apt-get update - - # Install Docker CE - sudo apt-get install -y docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -fi - -# Start and enable Docker service -sudo systemctl start docker -sudo systemctl enable docker - -# Add current CI user to docker group and change socket permissions -current_user=$(whoami) -if [[ "$current_user" != "root" ]]; then - echo "Adding CI user '$current_user' to docker group..." - sudo usermod -aG docker "$current_user" - - # For CI: Change docker socket permissions to allow immediate access - echo "Setting docker socket permissions for CI..." - sudo chmod 666 /var/run/docker.sock -fi - -# Verify installation -echo "Verifying Docker installation..." -sudo docker --version - -# Test docker access -echo "Testing Docker access..." -if docker ps >/dev/null 2>&1; then - echo "✅ Docker access confirmed" -else - echo "❌ Docker access failed - CI may not work properly" - echo "Trying with sudo..." - if sudo docker ps >/dev/null 2>&1; then - echo "⚠️ Docker only works with sudo" - fi -fi diff --git a/scripts/minikube/install-minikube.sh b/scripts/minikube/install-minikube.sh index b82c829b6..527b27543 100755 --- a/scripts/minikube/install-minikube.sh +++ b/scripts/minikube/install-minikube.sh @@ -27,12 +27,49 @@ case "${ARCH}" in esac echo "Installing minikube on ${ARCH} architecture..." + +# Install crictl (container runtime CLI) +echo "Installing crictl for ${ARCH}..." +CRICTL_VERSION=$(curl -s https://api.github.com/repos/kubernetes-sigs/cri-tools/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') + +# Download and extract crictl tar.gz +mkdir -p "${PROJECT_DIR:-.}/bin" +CRICTL_URL="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${MINIKUBE_ARCH}.tar.gz" +echo "Downloading ${CRICTL_URL}" +TEMP_DIR=$(mktemp -d) +curl --retry 3 --silent -L "${CRICTL_URL}" -o "${TEMP_DIR}/crictl.tar.gz" +tar -xzf "${TEMP_DIR}/crictl.tar.gz" -C "${TEMP_DIR}/" +chmod +x "${TEMP_DIR}/crictl" +mv "${TEMP_DIR}/crictl" "${PROJECT_DIR:-.}/bin/crictl" +rm -rf "${TEMP_DIR}" +echo "Installed crictl to ${PROJECT_DIR:-.}/bin" + +# Also install crictl system-wide so minikube can find it +echo "Installing crictl system-wide..." +if [[ -f "${PROJECT_DIR:-.}/bin/crictl" ]]; then + # Install to both /usr/local/bin and /usr/bin for better PATH coverage + sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/local/bin/crictl + sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/bin/crictl + sudo chmod +x /usr/local/bin/crictl + sudo chmod +x /usr/bin/crictl + echo "✅ crictl installed to /usr/local/bin/ and /usr/bin/" + + # Verify installation + if command -v crictl >/dev/null 2>&1; then + echo "✅ crictl is now available in PATH: $(which crictl)" + echo "✅ crictl version: $(crictl --version 2>/dev/null || echo 'version check failed')" + else + echo "⚠️ crictl installed but not found in PATH" + fi +else + echo "⚠️ crictl not found in project bin, minikube may have issues" +fi + # Install minikube echo "Installing minikube for ${ARCH}..." - MINIKUBE_VERSION=$(curl -s https://api.github.com/repos/kubernetes/minikube/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') # Download minikube for detected architecture download_and_install_binary "${PROJECT_DIR:-.}/bin" minikube "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-${MINIKUBE_ARCH}" -echo "Minikube ${MINIKUBE_VERSION} installed successfully for ${ARCH}" +echo "Crictl ${CRICTL_VERSION} and Minikube ${MINIKUBE_VERSION} installed successfully for ${ARCH}" diff --git a/scripts/minikube/minikube_host.sh b/scripts/minikube/minikube_host.sh index c383ac09c..09c5b80f7 100755 --- a/scripts/minikube/minikube_host.sh +++ b/scripts/minikube/minikube_host.sh @@ -79,19 +79,18 @@ remote-prepare-local-e2e-run() { get-kubeconfig() { # For minikube, we need to get the kubeconfig and certificates echo "Getting kubeconfig from minikube on s390x host..." - local profile=${MINIKUBE_PROFILE:-mongodb-e2e} # Create local minikube directory structure - mkdir -p "${HOME}/.minikube/profiles/${profile}" + mkdir -p "${HOME}/.minikube" # Copy certificates from remote host echo "Copying minikube certificates..." scp "${host_url}:~/.minikube/ca.crt" "${HOME}/.minikube/" - scp "${host_url}:~/.minikube/profiles/${profile}/client.crt" "${HOME}/.minikube/profiles/${profile}/" - scp "${host_url}:~/.minikube/profiles/${profile}/client.key" "${HOME}/.minikube/profiles/${profile}/" + scp "${host_url}:~/.minikube/client.crt" "${HOME}/.minikube/" + scp "${host_url}:~/.minikube/client.key" "${HOME}/.minikube/" # Get kubeconfig and update paths to local ones - ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; export MINIKUBE_PROFILE=${profile}; kubectl config view --raw" > "${kubeconfig_path}" + ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; kubectl config view --raw" > "${kubeconfig_path}" # Update certificate paths to local paths sed -i '' "s|/home/cloud-user/.minikube|${HOME}/.minikube|g" "${kubeconfig_path}" @@ -103,12 +102,10 @@ get-kubeconfig() { } recreate-minikube-cluster() { - shift 1 - profile_name=${1:-mongodb-e2e} configure "$(uname -m)" 2>&1| prepend "minikube_host.sh configure" - echo "Recreating minikube cluster ${profile_name} on ${S390_HOST_NAME} (${host_url})..." + echo "Recreating minikube cluster on ${S390_HOST_NAME} (${host_url})..." # shellcheck disable=SC2088 - ssh -T "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; export MINIKUBE_PROFILE=${profile_name}; minikube delete --profile=${profile_name} || true; minikube start --profile=${profile_name} --driver=docker --memory=8192mb --cpus=4" + ssh -T "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; minikube delete || true; minikube start --driver=podman --memory=8192mb --cpus=4" echo "Copying kubeconfig to ${kubeconfig_path}" get-kubeconfig } @@ -116,11 +113,10 @@ recreate-minikube-cluster() { tunnel() { shift 1 echo "Setting up tunnel for minikube cluster..." - local profile=${MINIKUBE_PROFILE:-mongodb-e2e} # Get the minikube API server port from remote host local api_port - api_port=$(ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; export MINIKUBE_PROFILE=${profile}; minikube ip --profile=${profile} 2>/dev/null && echo ':8443' | tr -d '\n'") + api_port=$(ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; minikube ip 2>/dev/null && echo ':8443' | tr -d '\n'") if [[ -z "${api_port}" ]]; then echo "Could not determine minikube API server details. Is the cluster running?" @@ -134,7 +130,7 @@ tunnel() { # Forward the API server port through minikube set -x # shellcheck disable=SC2029 - ssh -L "${port}:$(ssh -T -q "${host_url}" "export MINIKUBE_PROFILE=${profile}; minikube ip --profile=${profile}"):${port}" "${host_url}" "$@" + ssh -L "${port}:$(ssh -T -q "${host_url}" "minikube ip"):${port}" "${host_url}" "$@" set +x } @@ -185,7 +181,7 @@ PREREQUISITES: COMMANDS: configure installs on a host: calls sync, switches context, installs necessary software (auto-detects arch) sync rsync of project directory - recreate-minikube-cluster recreates minikube cluster with specific profile and executes get-kubeconfig + recreate-minikube-cluster recreates minikube cluster and executes get-kubeconfig remote-prepare-local-e2e-run executes prepare-local-e2e on the remote host get-kubeconfig copies remote minikube kubeconfig locally to ~/.operator-dev/s390-host.kubeconfig tunnel [args] creates ssh session with tunneling to all API servers diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index a455fca57..8726e8e3f 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -5,23 +5,6 @@ source scripts/dev/set_env_context.sh set -Eeou pipefail -check_disk_space() { - echo "Checking available disk space..." - local available_gb - available_gb=$(df / | awk 'NR==2 {print int($4/1024/1024)}') - - if [[ $available_gb -lt 5 ]]; then - echo "ERROR: Insufficient disk space. Available: ${available_gb}GB, Required: 5GB minimum" - echo "Please clean up disk space before continuing:" - echo " sudo dnf clean all" - echo " sudo rm -rf /var/cache/dnf/* /tmp/* /var/tmp/*" - echo " docker system prune -af" - return 1 - fi - - echo "Disk space check passed: ${available_gb}GB available" -} - set_limits() { echo "Increasing fs.inotify.max_user_instances" sudo sysctl -w fs.inotify.max_user_instances=8192 @@ -64,17 +47,125 @@ download_minikube() { scripts/minikube/install-minikube.sh } -download_docker() { - echo "Installing Docker for ${ARCH}..." - scripts/minikube/install-docker.sh +# Fix crictl for ppc64le kicbase images +fix_crictl_ppc64le() { + if [[ "${ARCH}" == "ppc64le" ]]; then + echo ">>> Applying ppc64le crictl fix for podman driver..." + + # Ensure crictl is available on host + if ! command -v crictl &> /dev/null; then + echo "❌ crictl not found on host - this is required for ppc64le fix" + return 1 + fi + + # Wait for minikube container to be created + local container_name="minikube" + local max_wait=60 + local waited=0 + + echo "Waiting for minikube container '${container_name}' to be ready..." + while ! podman ps --format '{{.Names}}' | grep -q "^${container_name}$" && [[ $waited -lt $max_wait ]]; do + sleep 2 + waited=$((waited + 2)) + done + + if [[ $waited -ge $max_wait ]]; then + echo "⚠️ Timeout waiting for minikube container - crictl fix may be needed manually" + return 1 + fi + + # Copy crictl from host to container using podman + echo "Copying crictl to minikube container using podman..." + if podman cp "$(which crictl)" "${container_name}:/usr/bin/crictl"; then + podman exec "${container_name}" chmod +x /usr/bin/crictl + echo "✅ crictl fix applied successfully with podman" + + # Verify the fix + if podman exec "${container_name}" crictl --version &>/dev/null; then + echo "✅ crictl is now working in minikube container" + return 0 + else + echo "⚠️ crictl copy succeeded but binary may not be working" + return 1 + fi + else + echo "❌ Failed to copy crictl to container via podman" + return 1 + fi + fi + return 0 } -check_disk_space -set_limits -download_docker & -download_minikube & +# Start minikube with podman driver +start_minikube_cluster() { + echo ">>> Starting minikube cluster with podman driver..." + + local start_args=("--driver=podman") + + # Add Calico CNI for better compatibility + start_args+=("--cni=calico") + + # For ppc64le, we need to handle the crictl fix + if [[ "${ARCH}" == "ppc64le" ]]; then + echo "Starting minikube (ppc64le requires crictl fix for podman)..." + + # Start minikube in background to let container initialize + timeout 120 "${PROJECT_DIR:-.}/bin/minikube" start "${start_args[@]}" & + local minikube_pid=$! -wait + # Wait a bit for container to be created + sleep 15 + + # Apply crictl fix while minikube is starting + if fix_crictl_ppc64le; then + echo "✅ crictl fix applied, waiting for minikube to complete..." + else + echo "⚠️ crictl fix failed, but continuing..." + fi + + # Wait for minikube to finish + if wait $minikube_pid; then + echo "✅ Minikube started successfully with crictl fix" + else + echo "❌ Minikube failed to start" + return 1 + fi + else + # Standard minikube start + if "${PROJECT_DIR:-.}/bin/minikube" start "${start_args[@]}"; then + echo "✅ Minikube started successfully" + else + echo "❌ Minikube failed to start" + return 1 + fi + fi +} + +setup_podman() { + echo "Setting up podman for ${ARCH}..." + + # Check if podman is already available + if command -v podman &> /dev/null; then + echo "✅ Podman already installed" + else + echo "Installing podman..." + sudo dnf install -y podman + fi + + # Configure podman + echo "Configuring Podman for ${ARCH}..." + + # Start podman service if not running + systemctl --user enable podman.socket 2>/dev/null || true + systemctl --user start podman.socket 2>/dev/null || true + + echo "✅ Podman configured successfully" +} + +# Setup podman and container runtime +setup_podman +set_limits +download_minikube echo "" echo ">>> Verifying minikube installation..." @@ -88,24 +179,61 @@ else fi echo "" -echo ">>> Verifying docker installation..." -if command -v docker &> /dev/null; then - docker_version=$(docker --version 2>/dev/null) - echo "✅ Docker installed successfully: ${docker_version}" - - # Check if docker service is running - if systemctl is-active --quiet docker 2>/dev/null || docker info &>/dev/null; then - echo "✅ Docker service is running" +echo ">>> Verifying podman installation..." +if command -v podman &> /dev/null; then + podman_version=$(podman --version 2>/dev/null) + echo "✅ Podman installed successfully: ${podman_version}" + + # Check podman info + if podman info &>/dev/null; then + echo "✅ Podman is working correctly" else - echo "⚠️ Docker is installed but service may not be running" - echo "You may need to start docker service: sudo systemctl start docker" + echo "⚠️ Podman may need additional configuration" fi else - echo "❌ Docker installation failed - docker command not found" - echo "Please check the installation logs above for errors" + echo "❌ Podman installation failed - podman command not found" exit 1 fi +echo "" +echo ">>> Verifying crictl is available in PATH..." +if command -v crictl &> /dev/null; then + crictl_version=$(crictl --version 2>/dev/null || echo "crictl available") + crictl_path=$(which crictl 2>/dev/null || echo "unknown path") + echo "✅ crictl found in PATH: ${crictl_version}" + echo "✅ crictl location: ${crictl_path}" +else + echo "❌ crictl not found in PATH - this may cause minikube issues" + echo "Checking if crictl exists in project bin..." + if [[ -f "${PROJECT_DIR:-.}/bin/crictl" ]]; then + echo "Found crictl in project bin, installing to system directories..." + sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/local/bin/crictl + sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/bin/crictl + sudo chmod +x /usr/local/bin/crictl + sudo chmod +x /usr/bin/crictl + echo "✅ crictl installed to /usr/local/bin/ and /usr/bin/" + + # Force PATH refresh and verify + hash -r 2>/dev/null || true + if command -v crictl &> /dev/null; then + echo "✅ crictl now available: $(which crictl)" + else + echo "⚠️ crictl installation may not be working properly" + fi + else + echo "❌ crictl not found in project bin either" + fi +fi + +# Start the minikube cluster +start_minikube_cluster + +# Update kubectl context to point to the running cluster +echo "" +echo ">>> Updating kubectl context for minikube cluster..." +"${PROJECT_DIR:-.}/bin/minikube" update-context +echo "✅ Kubectl context updated successfully" + echo "Minikube host setup completed successfully for ${ARCH}!" # Final status @@ -114,6 +242,12 @@ echo "==========================================" echo "✅ Setup Summary" echo "==========================================" echo "Architecture: ${ARCH}" -echo "Minikube Profile: ${MINIKUBE_PROFILE:-mongodb-e2e}" +echo "Container Runtime: podman" +echo "Minikube Driver: podman" +echo "Minikube: Default cluster" echo "Minikube: ${minikube_version}" -echo "Docker: ${docker_version}" +echo "Podman: ${podman_version}" +echo "CNI: Calico" +if [[ "${ARCH}" == "ppc64le" ]]; then + echo "Special Config: ppc64le crictl fix applied for podman" +fi From 284c7e19beed1ab20b1e2ebd619b0efeef6aa25f Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 31 Jul 2025 16:21:51 +0200 Subject: [PATCH 026/164] z series support --- .evergreen.yml | 14 +++++++++++++- scripts/minikube/setup_minikube_host.sh | 4 ++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index 98eaf8a75..fdcb4eaae 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1473,7 +1473,7 @@ buildvariants: tasks: - name: e2e_smoke_task_group - - name: e2e_smoke_ibm + - name: e2e_smoke_ibm_power display_name: e2e_smoke_ibm tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] run_on: @@ -1481,6 +1481,18 @@ buildvariants: allowed_requesters: [ "patch", "github_tag" ] # depends_on: # - name: build_test_image +# variant: init_test_run + tasks: + - name: e2e_smoke_ibm_task_group + + - name: e2e_smoke_ibm_z + display_name: e2e_smoke_ibm + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - rhel9-zseries-large + allowed_requesters: [ "patch", "github_tag" ] +# depends_on: +# - name: build_test_image # variant: init_test_run tasks: - name: e2e_smoke_ibm_task_group diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index 8726e8e3f..cf38652dc 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -102,8 +102,8 @@ start_minikube_cluster() { local start_args=("--driver=podman") - # Add Calico CNI for better compatibility - start_args+=("--cni=calico") + # Use default bridge CNI to avoid Docker Hub rate limiting issues + # start_args+=("--cni=bridge") # For ppc64le, we need to handle the crictl fix if [[ "${ARCH}" == "ppc64le" ]]; then From 00439c0e67f64d59e62da3a7a34a8977ef8c7435 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 31 Jul 2025 18:32:27 +0200 Subject: [PATCH 027/164] custom container with working power critcl --- scripts/minikube/setup_minikube_host.sh | 246 ++++++++++++++---------- 1 file changed, 141 insertions(+), 105 deletions(-) diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index cf38652dc..f4af288c0 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -47,51 +47,97 @@ download_minikube() { scripts/minikube/install-minikube.sh } -# Fix crictl for ppc64le kicbase images -fix_crictl_ppc64le() { +# Setup local registry and build custom kicbase image for ppc64le with crictl +setup_local_registry_and_custom_image() { if [[ "${ARCH}" == "ppc64le" ]]; then - echo ">>> Applying ppc64le crictl fix for podman driver..." - - # Ensure crictl is available on host - if ! command -v crictl &> /dev/null; then - echo "❌ crictl not found on host - this is required for ppc64le fix" - return 1 + echo ">>> Setting up local registry and custom kicbase image for ppc64le..." + + # Check if local registry is running + if ! podman ps --filter "name=registry" --format "{{.Names}}" | grep -q "^registry$"; then + echo "Starting local container registry on port 5000..." + podman run -d -p 5000:5000 --name registry --restart=always docker.io/library/registry:2 || { + echo "Registry might already exist, trying to start it..." + podman start registry || { + echo "Removing existing registry and creating new one..." + podman rm -f registry 2>/dev/null || true + podman run -d -p 5000:5000 --name registry --restart=always docker.io/library/registry:2 + } + } + + # Wait for registry to be ready + echo "Waiting for registry to be ready..." + for i in {1..30}; do + if curl -s http://localhost:5000/v2/_catalog >/dev/null 2>&1; then + break + fi + sleep 1 + done + else + echo "✅ Local registry already running" fi - - # Wait for minikube container to be created - local container_name="minikube" - local max_wait=60 - local waited=0 - - echo "Waiting for minikube container '${container_name}' to be ready..." - while ! podman ps --format '{{.Names}}' | grep -q "^${container_name}$" && [[ $waited -lt $max_wait ]]; do - sleep 2 - waited=$((waited + 2)) - done - - if [[ $waited -ge $max_wait ]]; then - echo "⚠️ Timeout waiting for minikube container - crictl fix may be needed manually" - return 1 + + # Configure per-user podman to trust local registry (no system-wide changes) + echo "Configuring per-user podman registry settings..." + mkdir -p ~/.config/containers + + # Create user-specific registries.conf that includes insecure local registry + cat > ~/.config/containers/registries.conf << 'EOF' +unqualified-search-registries = ["registry.access.redhat.com", "registry.redhat.io", "docker.io"] + +[[registry]] +location = "localhost:5000" +insecure = true + +short-name-mode = "permissive" +EOF + + # Check if custom image already exists in local registry + if curl -s http://localhost:5000/v2/kicbase/tags/list | grep -q "v0.0.47"; then + echo "✅ Custom kicbase image already exists in local registry" + return 0 fi - - # Copy crictl from host to container using podman - echo "Copying crictl to minikube container using podman..." - if podman cp "$(which crictl)" "${container_name}:/usr/bin/crictl"; then - podman exec "${container_name}" chmod +x /usr/bin/crictl - echo "✅ crictl fix applied successfully with podman" - - # Verify the fix - if podman exec "${container_name}" crictl --version &>/dev/null; then - echo "✅ crictl is now working in minikube container" - return 0 - else - echo "⚠️ crictl copy succeeded but binary may not be working" - return 1 - fi - else - echo "❌ Failed to copy crictl to container via podman" - return 1 + + # Build custom kicbase image with crictl + echo "Building custom kicbase image with crictl for ppc64le..." + + # Create build directory if it doesn't exist + mkdir -p "${PROJECT_DIR:-.}/scripts/minikube/kicbase" + + # Create Dockerfile for custom kicbase + cat > "${PROJECT_DIR:-.}/scripts/minikube/kicbase/Dockerfile" << 'EOF' +FROM gcr.io/k8s-minikube/kicbase:v0.0.47 + +# Install crictl for ppc64le if needed +RUN if [ "$(uname -m)" = "ppc64le" ]; then \ + echo "Installing crictl for ppc64le architecture..." && \ + CRICTL_VERSION="v1.28.0" && \ + curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-ppc64le.tar.gz" \ + -o /tmp/crictl.tar.gz && \ + tar -C /usr/bin -xzf /tmp/crictl.tar.gz && \ + chmod +x /usr/bin/crictl && \ + rm /tmp/crictl.tar.gz && \ + echo "crictl installed successfully" && \ + crictl --version; \ + else \ + echo "Not ppc64le architecture, skipping crictl installation"; \ fi + +# Verify crictl is available +RUN command -v crictl >/dev/null 2>&1 && echo "crictl is available" || echo "crictl not found" +EOF + + # Build and push to local registry + echo "Building custom kicbase image..." + cd "${PROJECT_DIR:-.}/scripts/minikube/kicbase" + + podman build -t localhost:5000/kicbase:v0.0.47 . + + echo "Pushing custom image to local registry..." + podman push localhost:5000/kicbase:v0.0.47 --tls-verify=false + + cd - > /dev/null + + echo "✅ Custom kicbase image with crictl ready in local registry" fi return 0 } @@ -100,44 +146,36 @@ fix_crictl_ppc64le() { start_minikube_cluster() { echo ">>> Starting minikube cluster with podman driver..." - local start_args=("--driver=podman") + # Clean up any existing minikube state to avoid cached configuration issues + echo "Cleaning up any existing minikube state..." + if [[ -d ~/.minikube/machines/minikube ]]; then + echo "Removing ~/.minikube/machines/minikube directory..." + rm -rf ~/.minikube/machines/minikube + fi + + # Delete any existing minikube cluster to start fresh + echo "Ensuring clean minikube state..." + "${PROJECT_DIR:-.}/bin/minikube" delete 2>/dev/null || true - # Use default bridge CNI to avoid Docker Hub rate limiting issues - # start_args+=("--cni=bridge") + local start_args=("--driver=podman") - # For ppc64le, we need to handle the crictl fix + # Use custom kicbase image for ppc64le with crictl included if [[ "${ARCH}" == "ppc64le" ]]; then - echo "Starting minikube (ppc64le requires crictl fix for podman)..." - - # Start minikube in background to let container initialize - timeout 120 "${PROJECT_DIR:-.}/bin/minikube" start "${start_args[@]}" & - local minikube_pid=$! - - # Wait a bit for container to be created - sleep 15 + echo "Using custom kicbase image for ppc64le with crictl..." + start_args+=("--base-image=localhost:5000/kicbase:v0.0.47") + fi - # Apply crictl fix while minikube is starting - if fix_crictl_ppc64le; then - echo "✅ crictl fix applied, waiting for minikube to complete..." - else - echo "⚠️ crictl fix failed, but continuing..." - fi + # Use default bridge CNI to avoid Docker Hub rate limiting issues + # start_args+=("--cni=bridge") - # Wait for minikube to finish - if wait $minikube_pid; then - echo "✅ Minikube started successfully with crictl fix" - else - echo "❌ Minikube failed to start" - return 1 - fi + echo "Starting minikube with args: ${start_args[*]}" + if "${PROJECT_DIR:-.}/bin/minikube" start "${start_args[@]}"; then + echo "✅ Minikube started successfully" else - # Standard minikube start - if "${PROJECT_DIR:-.}/bin/minikube" start "${start_args[@]}"; then - echo "✅ Minikube started successfully" - else - echo "❌ Minikube failed to start" - return 1 - fi + echo "❌ Minikube failed to start" + echo "Minikube logs:" + "${PROJECT_DIR:-.}/bin/minikube" logs | tail -20 + return 1 fi } @@ -152,14 +190,31 @@ setup_podman() { sudo dnf install -y podman fi - # Configure podman - echo "Configuring Podman for ${ARCH}..." + # Configure podman for CI environment + echo "Configuring Podman for ${ARCH} CI environment..." + + # Enable lingering for the current user to fix systemd session issues + current_user=$(whoami) + current_uid=$(id -u) + echo "Enabling systemd lingering for user ${current_user} (UID: ${current_uid})" + sudo loginctl enable-linger "${current_uid}" 2>/dev/null || true + + # Configure podman to use cgroupfs instead of systemd in CI + mkdir -p ~/.config/containers + cat > ~/.config/containers/containers.conf << EOF +[containers] +cgroup_manager = "cgroupfs" +events_logger = "file" + +[engine] +cgroup_manager = "cgroupfs" +EOF # Start podman service if not running systemctl --user enable podman.socket 2>/dev/null || true systemctl --user start podman.socket 2>/dev/null || true - echo "✅ Podman configured successfully" + echo "✅ Podman configured successfully for CI" } # Setup podman and container runtime @@ -167,6 +222,9 @@ setup_podman set_limits download_minikube +# Setup local registry and custom kicbase image for ppc64le if needed +setup_local_registry_and_custom_image + echo "" echo ">>> Verifying minikube installation..." if command -v minikube &> /dev/null; then @@ -195,34 +253,12 @@ else exit 1 fi -echo "" -echo ">>> Verifying crictl is available in PATH..." -if command -v crictl &> /dev/null; then - crictl_version=$(crictl --version 2>/dev/null || echo "crictl available") - crictl_path=$(which crictl 2>/dev/null || echo "unknown path") - echo "✅ crictl found in PATH: ${crictl_version}" - echo "✅ crictl location: ${crictl_path}" +if [[ "${ARCH}" == "ppc64le" ]]; then + echo "" + echo ">>> Note: crictl will be patched into the minikube container after startup" else - echo "❌ crictl not found in PATH - this may cause minikube issues" - echo "Checking if crictl exists in project bin..." - if [[ -f "${PROJECT_DIR:-.}/bin/crictl" ]]; then - echo "Found crictl in project bin, installing to system directories..." - sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/local/bin/crictl - sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/bin/crictl - sudo chmod +x /usr/local/bin/crictl - sudo chmod +x /usr/bin/crictl - echo "✅ crictl installed to /usr/local/bin/ and /usr/bin/" - - # Force PATH refresh and verify - hash -r 2>/dev/null || true - if command -v crictl &> /dev/null; then - echo "✅ crictl now available: $(which crictl)" - else - echo "⚠️ crictl installation may not be working properly" - fi - else - echo "❌ crictl not found in project bin either" - fi + echo "" + echo ">>> Using standard kicbase image (crictl included for x86_64/aarch64/s390x)" fi # Start the minikube cluster @@ -247,7 +283,7 @@ echo "Minikube Driver: podman" echo "Minikube: Default cluster" echo "Minikube: ${minikube_version}" echo "Podman: ${podman_version}" -echo "CNI: Calico" +echo "CNI: bridge (default)" if [[ "${ARCH}" == "ppc64le" ]]; then - echo "Special Config: ppc64le crictl fix applied for podman" + echo "Special Config: Custom kicbase image with crictl via local registry" fi From c675c83bda19fa5431c407ce06a3750e487f6c8e Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 31 Jul 2025 18:33:59 +0200 Subject: [PATCH 028/164] custom container with working power critcl --- .evergreen.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index fdcb4eaae..fd3dc78b0 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1474,7 +1474,7 @@ buildvariants: - name: e2e_smoke_task_group - name: e2e_smoke_ibm_power - display_name: e2e_smoke_ibm + display_name: e2e_smoke_ibm_power tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] run_on: - rhel9-power-large @@ -1486,7 +1486,7 @@ buildvariants: - name: e2e_smoke_ibm_task_group - name: e2e_smoke_ibm_z - display_name: e2e_smoke_ibm + display_name: e2e_smoke_ibm_z tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] run_on: - rhel9-zseries-large From 37c31de408372163ffb6fefdf8e53a9ab7d05014 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 31 Jul 2025 21:40:47 +0200 Subject: [PATCH 029/164] add insecure --- scripts/minikube/setup_minikube_host.sh | 44 ++++++++++++++----------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index f4af288c0..88afd424f 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -51,7 +51,7 @@ download_minikube() { setup_local_registry_and_custom_image() { if [[ "${ARCH}" == "ppc64le" ]]; then echo ">>> Setting up local registry and custom kicbase image for ppc64le..." - + # Check if local registry is running if ! podman ps --filter "name=registry" --format "{{.Names}}" | grep -q "^registry$"; then echo "Starting local container registry on port 5000..." @@ -63,7 +63,7 @@ setup_local_registry_and_custom_image() { podman run -d -p 5000:5000 --name registry --restart=always docker.io/library/registry:2 } } - + # Wait for registry to be ready echo "Waiting for registry to be ready..." for i in {1..30}; do @@ -75,13 +75,18 @@ setup_local_registry_and_custom_image() { else echo "✅ Local registry already running" fi - - # Configure per-user podman to trust local registry (no system-wide changes) - echo "Configuring per-user podman registry settings..." - mkdir -p ~/.config/containers - - # Create user-specific registries.conf that includes insecure local registry - cat > ~/.config/containers/registries.conf << 'EOF' + + # Configure system-wide podman to trust local registry (with backup) + echo "Configuring system registries.conf to trust local registry..." + + # Backup existing registries.conf if it exists + if [[ -f /etc/containers/registries.conf ]]; then + echo "Backing up existing registries.conf..." + sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.minikube-backup + fi + + # Create a clean registries.conf that includes insecure local registry + sudo tee /etc/containers/registries.conf << 'EOF' unqualified-search-registries = ["registry.access.redhat.com", "registry.redhat.io", "docker.io"] [[registry]] @@ -90,19 +95,19 @@ insecure = true short-name-mode = "permissive" EOF - + # Check if custom image already exists in local registry if curl -s http://localhost:5000/v2/kicbase/tags/list | grep -q "v0.0.47"; then echo "✅ Custom kicbase image already exists in local registry" return 0 fi - + # Build custom kicbase image with crictl echo "Building custom kicbase image with crictl for ppc64le..." - + # Create build directory if it doesn't exist mkdir -p "${PROJECT_DIR:-.}/scripts/minikube/kicbase" - + # Create Dockerfile for custom kicbase cat > "${PROJECT_DIR:-.}/scripts/minikube/kicbase/Dockerfile" << 'EOF' FROM gcr.io/k8s-minikube/kicbase:v0.0.47 @@ -125,18 +130,18 @@ RUN if [ "$(uname -m)" = "ppc64le" ]; then \ # Verify crictl is available RUN command -v crictl >/dev/null 2>&1 && echo "crictl is available" || echo "crictl not found" EOF - + # Build and push to local registry echo "Building custom kicbase image..." cd "${PROJECT_DIR:-.}/scripts/minikube/kicbase" - + podman build -t localhost:5000/kicbase:v0.0.47 . - + echo "Pushing custom image to local registry..." podman push localhost:5000/kicbase:v0.0.47 --tls-verify=false - + cd - > /dev/null - + echo "✅ Custom kicbase image with crictl ready in local registry" fi return 0 @@ -152,7 +157,7 @@ start_minikube_cluster() { echo "Removing ~/.minikube/machines/minikube directory..." rm -rf ~/.minikube/machines/minikube fi - + # Delete any existing minikube cluster to start fresh echo "Ensuring clean minikube state..." "${PROJECT_DIR:-.}/bin/minikube" delete 2>/dev/null || true @@ -163,6 +168,7 @@ start_minikube_cluster() { if [[ "${ARCH}" == "ppc64le" ]]; then echo "Using custom kicbase image for ppc64le with crictl..." start_args+=("--base-image=localhost:5000/kicbase:v0.0.47") + start_args+=("--insecure-registry=localhost:5000") fi # Use default bridge CNI to avoid Docker Hub rate limiting issues From 148779d4c1053b09b69ef39bfe83c7b9d1379dc7 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 31 Jul 2025 23:43:02 +0200 Subject: [PATCH 030/164] add empty config handling and broken podman making --- .evergreen.yml | 4 ++-- scripts/dev/configure_docker_auth.sh | 5 +++++ scripts/minikube/setup_minikube_host.sh | 7 +++++++ 3 files changed, 14 insertions(+), 2 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index fd3dc78b0..2275ab0e4 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1477,7 +1477,7 @@ buildvariants: display_name: e2e_smoke_ibm_power tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] run_on: - - rhel9-power-large + - rhel9-power-small allowed_requesters: [ "patch", "github_tag" ] # depends_on: # - name: build_test_image @@ -1489,7 +1489,7 @@ buildvariants: display_name: e2e_smoke_ibm_z tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] run_on: - - rhel9-zseries-large + - rhel9-zseries-small allowed_requesters: [ "patch", "github_tag" ] # depends_on: # - name: build_test_image diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_docker_auth.sh index 74e0d9abc..2e90328ed 100755 --- a/scripts/dev/configure_docker_auth.sh +++ b/scripts/dev/configure_docker_auth.sh @@ -91,6 +91,11 @@ detect_container_runtime check_docker_daemon_is_running +# Initialize config file if it doesn't exist +if [[ ! -f "${CONFIG_PATH}" ]]; then + echo '{}' > "${CONFIG_PATH}" +fi + if [[ -f "${CONFIG_PATH}" ]]; then if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then # Check if login is actually required by making a HEAD request to ECR using existing credentials diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index 88afd424f..cf39e8f4d 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -191,6 +191,13 @@ setup_podman() { # Check if podman is already available if command -v podman &> /dev/null; then echo "✅ Podman already installed" + + # Reset podman if it's in an invalid state + if podman info 2>&1 | grep -q "invalid internal status"; then + echo "Resetting podman due to invalid internal status..." + podman system migrate || true + podman system reset --force || true + fi else echo "Installing podman..." sudo dnf install -y podman From 10811d7440721f34ceeb90a231630ec4ea466a12 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 31 Jul 2025 23:48:57 +0200 Subject: [PATCH 031/164] handle edge cases --- .../{e2e_smoke_ibm => e2e_smoke_ibm_power} | 0 scripts/dev/contexts/e2e_smoke_ibm_z | 18 ++++++++++++++++++ 2 files changed, 18 insertions(+) rename scripts/dev/contexts/{e2e_smoke_ibm => e2e_smoke_ibm_power} (100%) create mode 100644 scripts/dev/contexts/e2e_smoke_ibm_z diff --git a/scripts/dev/contexts/e2e_smoke_ibm b/scripts/dev/contexts/e2e_smoke_ibm_power similarity index 100% rename from scripts/dev/contexts/e2e_smoke_ibm rename to scripts/dev/contexts/e2e_smoke_ibm_power diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z new file mode 100644 index 000000000..03384c26c --- /dev/null +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -0,0 +1,18 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" + +export ops_manager_version="cloud_qa" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION + +export CUSTOM_MDB_VERSION=6.0.5 +export CUSTOM_MDB_PREV_VERSION=5.0.7 From 8d72a1c28742598c20d54f18e7943c083820efec Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Fri, 1 Aug 2025 10:47:16 +0200 Subject: [PATCH 032/164] Build cli --- .../build_multi_cluster_kubeconfig_creator.sh | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/scripts/evergreen/build_multi_cluster_kubeconfig_creator.sh b/scripts/evergreen/build_multi_cluster_kubeconfig_creator.sh index f8bbf1890..63729d024 100755 --- a/scripts/evergreen/build_multi_cluster_kubeconfig_creator.sh +++ b/scripts/evergreen/build_multi_cluster_kubeconfig_creator.sh @@ -16,14 +16,21 @@ echo "Building multi cluster kube config creation tool." project_dir="$(pwd)" pushd cmd/kubectl-mongodb -GOOS="${OS}" GOARCH="${ARCH}" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator" main.go -GOOS="linux" GOARCH="amd64" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_linux" main.go +GOOS="${OS}" GOARCH="${ARCH}" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator" main.go & + +GOOS="linux" GOARCH="amd64" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_amd64" main.go & +GOOS="linux" GOARCH="s390x" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_s390x" main.go & +GOOS="linux" GOARCH="ppc64le" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_ppc64le" main.go & +GOOS="linux" GOARCH="arm64" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_arm64" main.go & +wait popd chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator -# this one is used for the dockerfile to build the test image running on linux, this script might create 2 times -# the same binary, but on the average case it creates one for linux and one for darwin-arm -chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_linux +# these are used in the dockerfile +chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_amd64 +chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_s390x +chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_ppc64le +chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_arm64 mkdir -p bin || true cp docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator bin/kubectl-mongodb || true From 6b96e7f3b1f22c75d6dbaff78e9cac5395097a39 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Fri, 1 Aug 2025 10:48:08 +0200 Subject: [PATCH 033/164] E2E dockerfile --- docker/mongodb-kubernetes-tests/Dockerfile | 49 ++++++++++++++++------ 1 file changed, 36 insertions(+), 13 deletions(-) diff --git a/docker/mongodb-kubernetes-tests/Dockerfile b/docker/mongodb-kubernetes-tests/Dockerfile index 424f5ee76..f81b6b1ab 100644 --- a/docker/mongodb-kubernetes-tests/Dockerfile +++ b/docker/mongodb-kubernetes-tests/Dockerfile @@ -8,8 +8,10 @@ # ARG PYTHON_VERSION -FROM --platform=linux/amd64 public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim as builder +FROM public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim as builder +# MarkupSafe, cryptography, python-ldap, ruamel.yaml.clib, grpcio, psutil +COPY wheels /wheels RUN apt-get -qq update \ && apt-get -y -qq install \ @@ -17,10 +19,29 @@ RUN apt-get -qq update \ COPY requirements.txt requirements.txt -RUN python3 -m venv /venv && . /venv/bin/activate && python3 -m pip install -r requirements.txt +RUN python3 -m venv /venv && . /venv/bin/activate && pip install --upgrade pip && GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 pip install --find-links /wheels -r requirements.txt +FROM scratch AS tools_downloader -FROM --platform=linux/amd64 public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim +ARG mongodb_tools_url="https://fastdl.mongodb.org/tools/db" + +ARG mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" +ADD "${mongodb_tools_url}/${mongodb_tools_version_amd64}" /data/amd64/mongodb_tools.tgz + +ARG mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" +ADD "${mongodb_tools_url}/${mongodb_tools_version_arm64}" /data/arm64/mongodb_tools.tgz + +ARG mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-100.12.0.tgz" +ADD "${mongodb_tools_url}/${mongodb_tools_version_s390x}" /data/s390x/mongodb_tools.tgz + +ARG mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" +ADD "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}" /data/ppc64le/mongodb_tools.tgz + + +FROM public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim + +ARG TARGETARCH +COPY --from=tools_downloader "/data/${TARGETARCH}/mongodb_tools.tgz" /tmp/mongodb-tools.tgz RUN apt-get -qq update \ && apt-get -y -qq install \ @@ -30,20 +51,22 @@ RUN apt-get -qq update \ git \ openssl -ENV HELM_NAME "helm-v3.17.1-linux-amd64.tar.gz" -# install Helm -RUN curl --fail --retry 3 -L -o "${HELM_NAME}" "https://get.helm.sh/${HELM_NAME}" \ - && tar -xzf "${HELM_NAME}" \ - && rm "${HELM_NAME}" \ - && mv "linux-amd64/helm" "/usr/local/bin/helm" - -ADD https://fastdl.mongodb.org/tools/db/mongodb-database-tools-ubuntu2204-x86_64-100.12.0.tgz /tmp/mongodb-tools.tgz RUN mkdir -p /tmp/mongodb-tools && \ tar xfz /tmp/mongodb-tools.tgz -C /tmp/mongodb-tools && \ cp /tmp/mongodb-tools/*/bin/* /usr/local/bin/ && \ rm -rf /tmp/mongodb-tools /tmp/mongodb-tools.tgz -RUN curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" \ + +ENV HELM_NAME "helm-v3.17.1-linux-${TARGETARCH}.tar.gz" + +# install Helm +RUN curl --fail --retry 3 -L -o "${HELM_NAME}" "https://get.helm.sh/${HELM_NAME}" \ + && tar -xzf "${HELM_NAME}" \ + && rm "${HELM_NAME}" \ + && mv "linux-${TARGETARCH}/helm" "/usr/local/bin/helm" + +# install kubectl +RUN curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/${TARGETARCH}/kubectl" \ && chmod +x ./kubectl \ && mv ./kubectl /usr/local/bin/kubectl @@ -62,4 +85,4 @@ COPY release.json /release.json # we use the public directory to automatically test resources samples COPY public /mongodb-kubernetes/public -ADD multi-cluster-kube-config-creator_linux /usr/local/bin/multi-cluster-kube-config-creator +ADD "multi-cluster-kube-config-creator_${TARGETARCH}" /usr/local/bin/multi-cluster-kube-config-creator From 213887b2b4187bc29a347d088bfd777f839f78d9 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Fri, 1 Aug 2025 10:48:31 +0200 Subject: [PATCH 034/164] Operator dockerfile --- docker/mongodb-kubernetes-operator/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/mongodb-kubernetes-operator/Dockerfile b/docker/mongodb-kubernetes-operator/Dockerfile index 1b2fb371d..8253c5e4f 100644 --- a/docker/mongodb-kubernetes-operator/Dockerfile +++ b/docker/mongodb-kubernetes-operator/Dockerfile @@ -1,6 +1,7 @@ FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder -ADD https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 /usr/local/bin/jq +ARG BUILDARCH +ADD "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${BUILDARCH}" /usr/local/bin/jq RUN chmod +x /usr/local/bin/jq COPY go.sum go.mod /go/src/github.com/mongodb/mongodb-kubernetes/ From fd4367dca36936a06b8ee7fec74b135a3251019d Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Fri, 1 Aug 2025 10:48:57 +0200 Subject: [PATCH 035/164] Prepare script --- scripts/dev/prepare_local_e2e_run.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/scripts/dev/prepare_local_e2e_run.sh b/scripts/dev/prepare_local_e2e_run.sh index 6bac19df1..2139b894b 100755 --- a/scripts/dev/prepare_local_e2e_run.sh +++ b/scripts/dev/prepare_local_e2e_run.sh @@ -60,6 +60,11 @@ prepare_operator_config_map "$(kubectl config current-context)" 2>&1 | prepend " rm -rf docker/mongodb-kubernetes-tests/helm_chart cp -rf helm_chart docker/mongodb-kubernetes-tests/helm_chart +rm -rf docker/mongodb-kubernetes-tests/public +cp -rf public docker/mongodb-kubernetes-tests/public +cp release.json docker/mongodb-kubernetes-tests/release.json +cp requirements.txt docker/mongodb-kubernetes-tests/requirements.txt + # shellcheck disable=SC2154 if [[ "${KUBE_ENVIRONMENT_NAME}" == "multi" ]]; then prepare_multi_cluster_e2e_run 2>&1 | prepend "prepare_multi_cluster_e2e_run" From 49ddbe2fcbd9c8188f6930d0d25d06840e5edb0a Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Fri, 1 Aug 2025 10:49:02 +0200 Subject: [PATCH 036/164] Readmes --- README.md | 1 - docker/mongodb-agent-non-matrix/README.md | 26 +++++++++++++------ docker/mongodb-agent/README.md | 18 ++++++++----- docker/mongodb-kubernetes-database/README.md | 2 +- .../mongodb-kubernetes-init-appdb/README.md | 2 +- .../README.md | 18 ++++++++----- docker/mongodb-kubernetes-operator/README.md | 4 +-- docker/mongodb-kubernetes-tests/README.md | 12 +++++++++ 8 files changed, 57 insertions(+), 26 deletions(-) diff --git a/README.md b/README.md index e230cf351..1f5a59a10 100644 --- a/README.md +++ b/README.md @@ -57,4 +57,3 @@ Migration from [MongoDB Community Operator](https://github.com/mongodb/mongodb-k See our detailed migration guides: - [Migrating from MongoDB Community Operator](docs/migration/community-operator-migration.md) - [Migrating from MongoDB Enterprise Kubernetes Operator](https://www.mongodb.com/docs/kubernetes/current/tutorial/migrate-to-mck/) - diff --git a/docker/mongodb-agent-non-matrix/README.md b/docker/mongodb-agent-non-matrix/README.md index b51bef808..c50d889c4 100644 --- a/docker/mongodb-agent-non-matrix/README.md +++ b/docker/mongodb-agent-non-matrix/README.md @@ -6,12 +6,22 @@ TODO: What to do with label quay.expires-after=48h? ```bash AGENT_VERSION="108.0.7.8810-1" TOOLS_VERSION="100.12.0" -AGENT_DISTRO="rhel9_x86_64" -TOOLS_DISTRO="rhel93-x86_64" -docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${AGENT_VERSION}" \ - --build-arg version="${VERSION}" \ - --build-arg agent_version="${AGENT_VERSION}" \ - --build-arg tools_version="${TOOLS_VERSION}" \ - --build-arg agent_distro="${AGENT_DISTRO}" \ - --build-arg tools_distro="${TOOLS_DISTRO}" +MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" +MONGODB_AGENT_URL="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" + +docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent-non-matrix/Dockerfile -t "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}" \ + --build-arg version="${AGENT_VERSION}" \ + --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL}" \ + --build-arg mongodb_agent_url="${MONGODB_AGENT_URL}" \ + --build-arg mongodb_agent_version_s390x="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz" \ + --build-arg mongodb_agent_version_ppc64le="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" \ + --build-arg mongodb_agent_version_amd64="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" \ + --build-arg mongodb_agent_version_arm64="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" \ + --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-${TOOLS_VERSION}.tgz" + +docker push "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}" ``` diff --git a/docker/mongodb-agent/README.md b/docker/mongodb-agent/README.md index 7cbb7d5ae..91fc2ca1f 100644 --- a/docker/mongodb-agent/README.md +++ b/docker/mongodb-agent/README.md @@ -8,11 +8,15 @@ binaries from there. Then we continue with the other steps to fully build the im For building the MongoDB Agent image locally use the example command: ```bash +VERSION="evergreen" AGENT_VERSION="108.0.7.8810-1" -INIT_DATABASE_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/mongodb-kubernetes-init-database:evergreen" +TOOLS_VERSION="100.12.0" MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" MONGODB_AGENT_URL="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" -docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${VERSION}" \ +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" +INIT_DATABASE_IMAGE="${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" + +docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent/Dockerfile -t "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}_${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg init_database_image="${INIT_DATABASE_IMAGE}" \ --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL}" \ @@ -21,9 +25,11 @@ docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,l --build-arg mongodb_agent_version_ppc64le="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" \ --build-arg mongodb_agent_version_amd64="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" \ --build-arg mongodb_agent_version_arm64="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" \ - --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" \ - --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" \ - --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-100.12.0.tgz" \ - --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" + --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-${TOOLS_VERSION}.tgz" + +docker push "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}_${VERSION}" ``` diff --git a/docker/mongodb-kubernetes-database/README.md b/docker/mongodb-kubernetes-database/README.md index ae3e3fde4..7dd70a6fa 100644 --- a/docker/mongodb-kubernetes-database/README.md +++ b/docker/mongodb-kubernetes-database/README.md @@ -40,7 +40,7 @@ For building the MongoDB Database image locally use the example command: ```bash VERSION="1.3.0" -BASE_REPO_URL="" +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-database:${VERSION}" \ --build-arg VERSION="${VERSION}" diff --git a/docker/mongodb-kubernetes-init-appdb/README.md b/docker/mongodb-kubernetes-init-appdb/README.md index 72e31c3dd..f3d51eb1c 100644 --- a/docker/mongodb-kubernetes-init-appdb/README.md +++ b/docker/mongodb-kubernetes-init-appdb/README.md @@ -5,7 +5,7 @@ For building the MongoDB Init AppDB image locally use the example command: ```bash VERSION="1.3.0" MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" -BASE_REPO_URL="" +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-init-appdb/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-init-appdb:${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL_UBI}" \ diff --git a/docker/mongodb-kubernetes-init-database/README.md b/docker/mongodb-kubernetes-init-database/README.md index 8b0b16787..04571f284 100644 --- a/docker/mongodb-kubernetes-init-database/README.md +++ b/docker/mongodb-kubernetes-init-database/README.md @@ -3,16 +3,20 @@ For building the MongoDB Init AppDB image locally use the example command: ```bash -VERSION="1.3.0" +VERSION="evergreen" +TOOLS_VERSION="100.12.0" MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db" -BASE_REPO_URL="" -docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-init-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" \ +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" +docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-init-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL_UBI}" \ - --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" \ - --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" \ - --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-100.12.0.tgz" \ - --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" + --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-${TOOLS_VERSION}.tgz" docker push "${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" ``` + +first no cache 2:20.28 total +second no cache 2:31.74 total diff --git a/docker/mongodb-kubernetes-operator/README.md b/docker/mongodb-kubernetes-operator/README.md index adb532345..546ed893c 100644 --- a/docker/mongodb-kubernetes-operator/README.md +++ b/docker/mongodb-kubernetes-operator/README.md @@ -13,10 +13,10 @@ CGO_ENABLED=0 GOOS=linux GOFLAGS="-mod=vendor" go build -i -o mongodb-kubernetes For building the MongoDB Init Ops Manager image locally use the example command: ```bash -VERSION="1.3.0" +VERSION="evergreen" LOG_AUTOMATION_CONFIG_DIFF="false" USE_RACE="false" -BASE_REPO_URL="" +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-operator/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes:${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg log_automation_config_diff="${LOG_AUTOMATION_CONFIG_DIFF}" \ diff --git a/docker/mongodb-kubernetes-tests/README.md b/docker/mongodb-kubernetes-tests/README.md index dec9ac764..e09b77a0e 100644 --- a/docker/mongodb-kubernetes-tests/README.md +++ b/docker/mongodb-kubernetes-tests/README.md @@ -106,6 +106,18 @@ indicate which test classes need to be run. But for now they help us to call a particular E2E task we are interested in. +## Building test image + +```bash +make prepare-local-e2e +cd docker/mongodb-kubernetes-tests +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" +docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-tests:evergreen" \ + --build-arg PYTHON_VERSION="3.13" + +docker push "${BASE_REPO_URL}mongodb-kubernetes-tests:evergreen" +``` + # Writing New Tests # ### Create a new Python test file ### From 449152fed571c867e5726c380a55926a41afe4cf Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Fri, 1 Aug 2025 16:04:27 +0200 Subject: [PATCH 037/164] Fix e2e dockerfile --- docker/mongodb-kubernetes-tests/Dockerfile | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/docker/mongodb-kubernetes-tests/Dockerfile b/docker/mongodb-kubernetes-tests/Dockerfile index f81b6b1ab..5347a8f19 100644 --- a/docker/mongodb-kubernetes-tests/Dockerfile +++ b/docker/mongodb-kubernetes-tests/Dockerfile @@ -10,16 +10,13 @@ ARG PYTHON_VERSION FROM public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim as builder -# MarkupSafe, cryptography, python-ldap, ruamel.yaml.clib, grpcio, psutil -COPY wheels /wheels - RUN apt-get -qq update \ && apt-get -y -qq install \ - curl libldap2-dev libsasl2-dev build-essential git + curl libldap2-dev libsasl2-dev build-essential git libssl-dev pkg-config COPY requirements.txt requirements.txt -RUN python3 -m venv /venv && . /venv/bin/activate && pip install --upgrade pip && GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 pip install --find-links /wheels -r requirements.txt +RUN python3 -m venv /venv && . /venv/bin/activate && pip install --upgrade pip && GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 pip install -r requirements.txt FROM scratch AS tools_downloader From a4cc24fdc7eb8f7f2e351745c52f2aaa1c4b463b Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Fri, 1 Aug 2025 17:00:36 +0200 Subject: [PATCH 038/164] use python wheel locacl build, unify teardown for minikube, use run for cloudqa --- .evergreen-functions.yml | 11 +- .evergreen.yml | 12 +- scripts/dev/configure_docker_auth.sh | 74 +++++--- scripts/dev/contexts/e2e_smoke_ibm_power | 1 + scripts/dev/contexts/e2e_smoke_ibm_z | 1 + scripts/dev/recreate_python_venv.sh | 90 ++++------ .../evergreen/setup_kubernetes_environment.sh | 2 + scripts/evergreen/setup_minikube_host.sh | 4 +- .../teardown_kubernetes_environment.sh | 8 +- scripts/minikube/setup_minikube_host.sh | 161 +++++++----------- 10 files changed, 159 insertions(+), 205 deletions(-) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index 71837ab97..c67634152 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -236,15 +236,6 @@ functions: - ${workdir}/bin binary: scripts/dev/setup_evg_host.sh - setup_ibm_host: &setup_ibm_host - command: subprocess.exec - type: setup - params: - working_dir: src/github.com/mongodb/mongodb-kubernetes - add_to_path: - - ${workdir}/bin - binary: scripts/minikube/setup_minikube_host.sh - lint_repo: - command: subprocess.exec type: setup @@ -349,7 +340,7 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes script: | source .generated/context.export.env - scripts/evergreen/e2e/setup_cloud_qa.py create + scripts/evergreen/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py create # The additional switch is needed, since we now have created the needed OM exports. - *switch_context diff --git a/.evergreen.yml b/.evergreen.yml index 2275ab0e4..136212703 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -140,7 +140,7 @@ variables: - &teardown_group teardown_group: - - func: prune_docker_resources + - func: teardown_kubernetes_environment - func: run_retry_script - &base_om7_dependency @@ -1197,13 +1197,7 @@ task_groups: - name: e2e_smoke_ibm_task_group max_hosts: -1 <<: *setup_group_ibm - setup_task_can_fail_task: true - setup_task: - - func: cleanup_exec_environment - teardown_task_can_fail_task: true - teardown_task: - - func: upload_e2e_logs - - func: teardown_kubernetes_environment + <<: *setup_and_teardown_task_cloudqa tasks: - e2e_replica_set <<: *teardown_group @@ -1477,7 +1471,7 @@ buildvariants: display_name: e2e_smoke_ibm_power tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] run_on: - - rhel9-power-small + - rhel9-power-large allowed_requesters: [ "patch", "github_tag" ] # depends_on: # - name: build_test_image diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_docker_auth.sh index 2e90328ed..4edf68817 100755 --- a/scripts/dev/configure_docker_auth.sh +++ b/scripts/dev/configure_docker_auth.sh @@ -10,11 +10,12 @@ source scripts/funcs/kubernetes # Detect available container runtime detect_container_runtime() { - if command -v podman &> /dev/null && podman info &> /dev/null; then + if command -v podman &> /dev/null && (podman info &> /dev/null || sudo podman info &> /dev/null); then CONTAINER_RUNTIME="podman" - CONFIG_PATH="${HOME}/.config/containers/auth.json" - mkdir -p "$(dirname "${CONFIG_PATH}")" - echo "Using Podman for container authentication" + # Use root's auth.json since minikube uses sudo podman + CONFIG_PATH="/root/.config/containers/auth.json" + sudo mkdir -p "$(dirname "${CONFIG_PATH}")" + echo "Using Podman for container authentication (sudo mode)" return 0 elif command -v docker &> /dev/null; then CONTAINER_RUNTIME="docker" @@ -60,14 +61,23 @@ check_docker_daemon_is_running() { remove_element() { config_option="${1}" tmpfile=$(mktemp) - + # Initialize config file if it doesn't exist if [[ ! -f "${CONFIG_PATH}" ]]; then - echo '{}' > "${CONFIG_PATH}" + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + echo '{}' | sudo tee "${CONFIG_PATH}" > /dev/null + else + echo '{}' > "${CONFIG_PATH}" + fi + fi + + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + sudo "${PROJECT_DIR:-.}/bin/jq" 'del(.'"${config_option}"')' "${CONFIG_PATH}" >"${tmpfile}" + sudo cp "${tmpfile}" "${CONFIG_PATH}" + else + "${PROJECT_DIR:-.}/bin/jq" 'del(.'"${config_option}"')' "${CONFIG_PATH}" >"${tmpfile}" + cp "${tmpfile}" "${CONFIG_PATH}" fi - - jq 'del(.'"${config_option}"')' "${CONFIG_PATH}" >"${tmpfile}" - cp "${tmpfile}" "${CONFIG_PATH}" rm "${tmpfile}" } @@ -75,9 +85,9 @@ remove_element() { container_login() { local username="$1" local registry="$2" - + if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then - podman login --username "${username}" --password-stdin "${registry}" + sudo podman login --username "${username}" --password-stdin "${registry}" else docker login --username "${username}" --password-stdin "${registry}" fi @@ -93,14 +103,22 @@ check_docker_daemon_is_running # Initialize config file if it doesn't exist if [[ ! -f "${CONFIG_PATH}" ]]; then - echo '{}' > "${CONFIG_PATH}" + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + echo '{}' | sudo tee "${CONFIG_PATH}" > /dev/null + else + echo '{}' > "${CONFIG_PATH}" + fi fi if [[ -f "${CONFIG_PATH}" ]]; then if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then # Check if login is actually required by making a HEAD request to ECR using existing credentials echo "Checking if container registry credentials are valid..." - ecr_auth=$(jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + ecr_auth=$(sudo "${PROJECT_DIR:-.}/bin/jq" -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") + else + ecr_auth=$("${PROJECT_DIR:-.}/bin/jq" -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") + fi if [[ -n "${ecr_auth}" ]]; then http_status=$(curl --head -s -o /dev/null -w "%{http_code}" --max-time 3 "https://268558157000.dkr.ecr.us-east-1.amazonaws.com/v2/dev/mongodb-kubernetes/manifests/latest" \ @@ -120,11 +138,20 @@ if [[ -f "${CONFIG_PATH}" ]]; then # There could be some leftovers on Evergreen (Docker-specific, skip for Podman) if [[ "${CONTAINER_RUNTIME}" == "docker" ]]; then - if grep -q "credsStore" "${CONFIG_PATH}"; then - remove_element "credsStore" - fi - if grep -q "credHelpers" "${CONFIG_PATH}"; then - remove_element "credHelpers" + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + if sudo grep -q "credsStore" "${CONFIG_PATH}"; then + remove_element "credsStore" + fi + if sudo grep -q "credHelpers" "${CONFIG_PATH}"; then + remove_element "credHelpers" + fi + else + if grep -q "credsStore" "${CONFIG_PATH}"; then + remove_element "credsStore" + fi + if grep -q "credHelpers" "${CONFIG_PATH}"; then + remove_element "credHelpers" + fi fi fi fi @@ -137,7 +164,7 @@ aws ecr get-login-password --region "us-east-1" | container_login "AWS" "2685581 # by default docker tries to store credentials in an external storage (e.g. OS keychain) - not in the config.json # We need to store it as base64 string in config.json instead so we need to remove the "credsStore" element # This is Docker-specific behavior, Podman stores credentials directly in auth.json -if [[ "${CONTAINER_RUNTIME}" == "docker" ]] && grep -q "credsStore" "${CONFIG_PATH}"; then +if [[ "${CONTAINER_RUNTIME}" == "docker" ]] && (([[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]] && sudo grep -q "credsStore" "${CONFIG_PATH}") || ([[ "${CONFIG_PATH}" != "/root/.config/containers/auth.json" ]] && grep -q "credsStore" "${CONFIG_PATH}")); then remove_element "credsStore" # login again to store the credentials into the config.json @@ -152,8 +179,13 @@ if [[ -n "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON:-}" ]]; then quay_io_auth_file=$(mktemp) config_tmp=$(mktemp) echo "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON}" | base64 -d > "${quay_io_auth_file}" - jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" - mv "${config_tmp}" "${CONFIG_PATH}" + if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then + sudo jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" + sudo mv "${config_tmp}" "${CONFIG_PATH}" + else + jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" + mv "${config_tmp}" "${CONFIG_PATH}" + fi rm "${quay_io_auth_file}" fi diff --git a/scripts/dev/contexts/e2e_smoke_ibm_power b/scripts/dev/contexts/e2e_smoke_ibm_power index 03384c26c..4ba998050 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_power +++ b/scripts/dev/contexts/e2e_smoke_ibm_power @@ -16,3 +16,4 @@ export CUSTOM_OM_VERSION export CUSTOM_MDB_VERSION=6.0.5 export CUSTOM_MDB_PREV_VERSION=5.0.7 +export KUBE_ENVIRONMENT_NAME=minikube diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z index 03384c26c..4ba998050 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_z +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -16,3 +16,4 @@ export CUSTOM_OM_VERSION export CUSTOM_MDB_VERSION=6.0.5 export CUSTOM_MDB_PREV_VERSION=5.0.7 +export KUBE_ENVIRONMENT_NAME=minikube diff --git a/scripts/dev/recreate_python_venv.sh b/scripts/dev/recreate_python_venv.sh index a5388e40f..bd8b411db 100755 --- a/scripts/dev/recreate_python_venv.sh +++ b/scripts/dev/recreate_python_venv.sh @@ -4,76 +4,54 @@ set -Eeou pipefail -# Parse command line arguments -INSTALL_REQUIREMENTS=true +ensure_required_python() { + local required_version="${PYTHON_VERSION:-3.10}" + local major_minor + major_minor=$(echo "${required_version}" | grep -oE '^[0-9]+\.[0-9]+') -while [[ $# -gt 0 ]]; do - case $1 in - --skip-requirements) - INSTALL_REQUIREMENTS=false - shift - ;; - -h|--help) - echo "Usage: $0 [--skip-requirements]" - echo " --skip-requirements Skip installing requirements.txt" - echo " -h, --help Show this help message" - exit 0 - ;; - *) - echo "Unknown option: $1" - echo "Use -h or --help for usage information" - exit 1 - ;; - esac -done + echo "Checking for Python ${required_version} (${major_minor}.x)..." >&2 -source scripts/dev/set_env_context.sh - -# Ensure Python 3.10 is available, install if needed -ensure_python310() { - echo "Checking current Python version..." >&2 - - # Check if current python is 3.10 + # Check if current python matches required version if command -v python3 &> /dev/null; then local version - if version=$(python3 --version 2>&1) && [[ "${version}" == *"Python 3.10"* ]]; then - echo "Found Python 3.10: ${version}" >&2 + if version=$(python3 --version 2>&1) && [[ "${version}" == *"Python ${major_minor}"* ]]; then + echo "Found Python ${major_minor}: ${version}" >&2 echo "python3" return 0 else echo "Current python3 version: ${version}" >&2 fi fi - - # Try to install Python 3.10 using pyenv if available + + # Try to install required Python version using pyenv if available if command -v pyenv &> /dev/null; then - echo "Python 3.10 not found. Attempting to install via pyenv..." >&2 - - # Check if any 3.10 version is already installed - if pyenv versions --bare | grep -q "^3\.10\."; then + echo "Python ${major_minor} not found. Attempting to install via pyenv..." >&2 + + # Check if any version in the required series is already installed + if pyenv versions --bare | grep -q "^${major_minor}\."; then local installed_version - installed_version=$(pyenv versions --bare | grep "^3\.10\." | head -1) - echo "Found existing pyenv Python 3.10: ${installed_version}" >&2 + installed_version=$(pyenv versions --bare | grep "^${major_minor}\." | head -1) + echo "Found existing pyenv Python ${major_minor}: ${installed_version}" >&2 pyenv global "${installed_version}" echo "python3" return 0 fi - - # Install latest Python 3.10 - local latest_310 - latest_310=$(pyenv install --list | grep -E "^[[:space:]]*3\.10\.[0-9]+$" | tail -1 | xargs) - if [[ -n "${latest_310}" ]]; then - echo "Installing Python ${latest_310} via pyenv..." >&2 - if pyenv install "${latest_310}"; then - pyenv global "${latest_310}" + + # Install latest version in the required series + local latest_version + latest_version=$(pyenv install --list | grep -E "^[[:space:]]*${major_minor}\.[0-9]+$" | tail -1 | xargs) + if [[ -n "${latest_version}" ]]; then + echo "Installing Python ${latest_version} via pyenv..." >&2 + if pyenv install "${latest_version}"; then + pyenv global "${latest_version}" echo "python3" return 0 fi fi fi - - echo "Error: No suitable Python 3.10 installation found and unable to install via pyenv." >&2 - echo "Please ensure Python 3.10 is installed or pyenv is available." >&2 + + echo "Error: No suitable Python ${major_minor} installation found and unable to install via pyenv." >&2 + echo "Please ensure Python ${major_minor} is installed or pyenv is available." >&2 return 1 } @@ -83,22 +61,16 @@ if [[ -d "${PROJECT_DIR}"/venv ]]; then rm -rf "venv" fi -# Ensure Python 3.10 is available -python_bin=$(ensure_python310) +# Ensure required Python version is available +python_bin=$(ensure_required_python) echo "Using python from the following path: ${python_bin}" "${python_bin}" -m venv venv source venv/bin/activate pip install --upgrade pip - -if [[ "${INSTALL_REQUIREMENTS}" == "true" ]]; then - echo "Installing requirements.txt..." - pip install -r requirements.txt -else - echo "Skipping requirements.txt installation (--skip-requirements flag used)" -fi - +echo "Installing requirements.txt..." +pip install -r requirements.txt echo "Python venv was recreated successfully." echo "Current python path: $(which python)" python --version diff --git a/scripts/evergreen/setup_kubernetes_environment.sh b/scripts/evergreen/setup_kubernetes_environment.sh index 707231c9f..6edaad50d 100755 --- a/scripts/evergreen/setup_kubernetes_environment.sh +++ b/scripts/evergreen/setup_kubernetes_environment.sh @@ -30,6 +30,8 @@ elif [ "${KUBE_ENVIRONMENT_NAME}" = "kind" ] || [ "${KUBE_ENVIRONMENT_NAME}" = " scripts/dev/recreate_kind_cluster.sh "kind" elif [[ "${KUBE_ENVIRONMENT_NAME}" = "multi" && "${CLUSTER_TYPE}" == "kind" ]]; then scripts/dev/recreate_kind_clusters.sh +elif [[ "${KUBE_ENVIRONMENT_NAME}" = "minikube" ]]; then + echo "Nothing to do for minikube" else echo "KUBE_ENVIRONMENT_NAME not recognized" echo "value is <<${KUBE_ENVIRONMENT_NAME}>>. If empty it means it was not set" diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index 9d4884530..f4f5d643b 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -38,7 +38,9 @@ run_setup_step() { } # Setup Python environment (needed for AWS CLI pip installation) -run_setup_step "Python Virtual Environment" "scripts/dev/recreate_python_venv.sh" "--skip-requirements" + +export GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 +run_setup_step "Python Virtual Environment" "scripts/dev/recreate_python_venv.sh" run_setup_step "AWS CLI Setup" "scripts/evergreen/setup_aws.sh" diff --git a/scripts/evergreen/teardown_kubernetes_environment.sh b/scripts/evergreen/teardown_kubernetes_environment.sh index e5e2bd869..ec0c59966 100755 --- a/scripts/evergreen/teardown_kubernetes_environment.sh +++ b/scripts/evergreen/teardown_kubernetes_environment.sh @@ -1,10 +1,16 @@ #!/usr/bin/env bash -set -Eeou pipefail +set -Eeoux pipefail source scripts/dev/set_env_context.sh if [ "${KUBE_ENVIRONMENT_NAME}" = "kind" ]; then + docker system prune -a -f echo "Deleting Kind cluster" kind delete clusters --all fi + +if [ "${KUBE_ENVIRONMENT_NAME}" = "minikube" ]; then + echo "Deleting minikube cluster" + "${PROJECT_DIR:-.}/bin/minikube" delete +fi diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index cf39e8f4d..16f1c1c31 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -30,17 +30,7 @@ EOF # retrieve arch variable off the shell command line ARCH=${1-"$(uname -m)"} -# Validate architecture -case "${ARCH}" in - s390x|ppc64le|x86_64|aarch64) - echo "Setting up minikube host for architecture: ${ARCH}" - ;; - *) - echo "ERROR: Unsupported architecture: ${ARCH}" - echo "Supported architectures: s390x, ppc64le, x86_64, aarch64" - exit 1 - ;; -esac +echo "Setting up minikube host for architecture: ${ARCH}" download_minikube() { echo "Downloading minikube for ${ARCH}..." @@ -52,17 +42,23 @@ setup_local_registry_and_custom_image() { if [[ "${ARCH}" == "ppc64le" ]]; then echo ">>> Setting up local registry and custom kicbase image for ppc64le..." - # Check if local registry is running - if ! podman ps --filter "name=registry" --format "{{.Names}}" | grep -q "^registry$"; then + # Check if local registry is running (with fallback for namespace issues) + registry_running=false + if curl -s http://localhost:5000/v2/_catalog >/dev/null 2>&1; then + echo "Registry detected via HTTP check (podman ps failed)" + registry_running=true + fi + + if ! $registry_running; then echo "Starting local container registry on port 5000..." - podman run -d -p 5000:5000 --name registry --restart=always docker.io/library/registry:2 || { - echo "Registry might already exist, trying to start it..." - podman start registry || { - echo "Removing existing registry and creating new one..." - podman rm -f registry 2>/dev/null || true - podman run -d -p 5000:5000 --name registry --restart=always docker.io/library/registry:2 - } - } + + # Clean up any existing registry first + sudo podman rm -f registry 2>/dev/null || true + + if ! sudo podman run -d -p 5000:5000 --name registry --restart=always docker.io/library/registry:2; then + echo "❌ Failed to start local registry - trying alternative approach" + exit 1 + fi # Wait for registry to be ready echo "Waiting for registry to be ready..." @@ -76,73 +72,63 @@ setup_local_registry_and_custom_image() { echo "✅ Local registry already running" fi - # Configure system-wide podman to trust local registry (with backup) - echo "Configuring system registries.conf to trust local registry..." - - # Backup existing registries.conf if it exists - if [[ -f /etc/containers/registries.conf ]]; then - echo "Backing up existing registries.conf..." - sudo cp /etc/containers/registries.conf /etc/containers/registries.conf.minikube-backup - fi - - # Create a clean registries.conf that includes insecure local registry - sudo tee /etc/containers/registries.conf << 'EOF' -unqualified-search-registries = ["registry.access.redhat.com", "registry.redhat.io", "docker.io"] + # Configure podman to trust local registry (both user and root level for minikube) + echo "Configuring registries.conf to trust local registry..." + # User-level config + mkdir -p ~/.config/containers + cat > ~/.config/containers/registries.conf << 'EOF' [[registry]] location = "localhost:5000" insecure = true +EOF -short-name-mode = "permissive" + # Root-level config (since minikube uses sudo podman) + sudo mkdir -p /root/.config/containers + sudo tee /root/.config/containers/registries.conf << 'EOF' >/dev/null +[[registry]] +location = "localhost:5000" +insecure = true EOF - # Check if custom image already exists in local registry + echo "✅ Registry configuration created for both user and root" + custom_image_tag="localhost:5000/kicbase:v0.0.47" + + # Determine image tag + custom_image_tag="localhost:5000/kicbase:v0.0.47" if curl -s http://localhost:5000/v2/kicbase/tags/list | grep -q "v0.0.47"; then - echo "✅ Custom kicbase image already exists in local registry" + echo "Custom kicbase image already exists in local registry" return 0 fi # Build custom kicbase image with crictl echo "Building custom kicbase image with crictl for ppc64le..." - # Create build directory if it doesn't exist + # Build custom kicbase image mkdir -p "${PROJECT_DIR:-.}/scripts/minikube/kicbase" - - # Create Dockerfile for custom kicbase cat > "${PROJECT_DIR:-.}/scripts/minikube/kicbase/Dockerfile" << 'EOF' FROM gcr.io/k8s-minikube/kicbase:v0.0.47 - -# Install crictl for ppc64le if needed RUN if [ "$(uname -m)" = "ppc64le" ]; then \ - echo "Installing crictl for ppc64le architecture..." && \ CRICTL_VERSION="v1.28.0" && \ curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-ppc64le.tar.gz" \ -o /tmp/crictl.tar.gz && \ tar -C /usr/bin -xzf /tmp/crictl.tar.gz && \ chmod +x /usr/bin/crictl && \ - rm /tmp/crictl.tar.gz && \ - echo "crictl installed successfully" && \ - crictl --version; \ - else \ - echo "Not ppc64le architecture, skipping crictl installation"; \ + rm /tmp/crictl.tar.gz; \ fi - -# Verify crictl is available -RUN command -v crictl >/dev/null 2>&1 && echo "crictl is available" || echo "crictl not found" EOF - # Build and push to local registry - echo "Building custom kicbase image..." cd "${PROJECT_DIR:-.}/scripts/minikube/kicbase" - - podman build -t localhost:5000/kicbase:v0.0.47 . - - echo "Pushing custom image to local registry..." - podman push localhost:5000/kicbase:v0.0.47 --tls-verify=false - - cd - > /dev/null - - echo "✅ Custom kicbase image with crictl ready in local registry" + sudo podman build -t "${custom_image_tag}" . || { + echo "Failed to build custom image" + return 1 + } + sudo podman push "${custom_image_tag}" --tls-verify=false || { + echo "Failed to push to registry" + return 1 + } + cd - >/dev/null + echo "Custom kicbase image ready: ${custom_image_tag}" fi return 0 } @@ -158,15 +144,14 @@ start_minikube_cluster() { rm -rf ~/.minikube/machines/minikube fi - # Delete any existing minikube cluster to start fresh echo "Ensuring clean minikube state..." "${PROJECT_DIR:-.}/bin/minikube" delete 2>/dev/null || true local start_args=("--driver=podman") - # Use custom kicbase image for ppc64le with crictl included if [[ "${ARCH}" == "ppc64le" ]]; then echo "Using custom kicbase image for ppc64le with crictl..." + start_args+=("--base-image=localhost:5000/kicbase:v0.0.47") start_args+=("--insecure-registry=localhost:5000") fi @@ -191,26 +176,17 @@ setup_podman() { # Check if podman is already available if command -v podman &> /dev/null; then echo "✅ Podman already installed" - - # Reset podman if it's in an invalid state - if podman info 2>&1 | grep -q "invalid internal status"; then - echo "Resetting podman due to invalid internal status..." - podman system migrate || true - podman system reset --force || true - fi - else - echo "Installing podman..." - sudo dnf install -y podman - fi - # Configure podman for CI environment - echo "Configuring Podman for ${ARCH} CI environment..." + # Diagnose podman state + echo "=== Podman Diagnostics ===" + echo "User: $(whoami), UID: $(id -u)" + echo "User namespace support: $(cat /proc/self/uid_map 2>/dev/null || echo 'not available')" + echo "Systemctl user status:" + systemctl --user status podman.socket 2>/dev/null || echo "podman.socket not active" + echo "Running 'sudo podman info' command..." + sudo podman info 2>&1 + fi - # Enable lingering for the current user to fix systemd session issues - current_user=$(whoami) - current_uid=$(id -u) - echo "Enabling systemd lingering for user ${current_user} (UID: ${current_uid})" - sudo loginctl enable-linger "${current_uid}" 2>/dev/null || true # Configure podman to use cgroupfs instead of systemd in CI mkdir -p ~/.config/containers @@ -223,11 +199,6 @@ events_logger = "file" cgroup_manager = "cgroupfs" EOF - # Start podman service if not running - systemctl --user enable podman.socket 2>/dev/null || true - systemctl --user start podman.socket 2>/dev/null || true - - echo "✅ Podman configured successfully for CI" } # Setup podman and container runtime @@ -249,23 +220,6 @@ else exit 1 fi -echo "" -echo ">>> Verifying podman installation..." -if command -v podman &> /dev/null; then - podman_version=$(podman --version 2>/dev/null) - echo "✅ Podman installed successfully: ${podman_version}" - - # Check podman info - if podman info &>/dev/null; then - echo "✅ Podman is working correctly" - else - echo "⚠️ Podman may need additional configuration" - fi -else - echo "❌ Podman installation failed - podman command not found" - exit 1 -fi - if [[ "${ARCH}" == "ppc64le" ]]; then echo "" echo ">>> Note: crictl will be patched into the minikube container after startup" @@ -295,7 +249,6 @@ echo "Container Runtime: podman" echo "Minikube Driver: podman" echo "Minikube: Default cluster" echo "Minikube: ${minikube_version}" -echo "Podman: ${podman_version}" echo "CNI: bridge (default)" if [[ "${ARCH}" == "ppc64le" ]]; then echo "Special Config: Custom kicbase image with crictl via local registry" From d72017af3825f5f99393a87e9d40b49037a53aff Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Fri, 1 Aug 2025 17:27:27 +0200 Subject: [PATCH 039/164] handle kube secret for podman --- scripts/funcs/kubernetes | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/scripts/funcs/kubernetes b/scripts/funcs/kubernetes index 5377d8927..d4426ef3c 100644 --- a/scripts/funcs/kubernetes +++ b/scripts/funcs/kubernetes @@ -98,12 +98,25 @@ create_image_registries_secret() { context=$1 namespace=$2 secret_name=$3 + + # Detect the correct config file path based on container runtime + local config_file + if command -v podman &> /dev/null && (podman info &> /dev/null || sudo podman info &> /dev/null); then + # For Podman, use root's auth.json since minikube uses sudo podman + config_file="/root/.config/containers/auth.json" + echo "Using Podman config: ${config_file}" + else + # For Docker, use standard docker config + config_file="${HOME}/.docker/config.json" + echo "Using Docker config: ${config_file}" + fi + # shellcheck disable=SC2154 if kubectl --context "${context}" get namespace "${namespace}"; then kubectl --context "${context}" -n "${namespace}" delete secret "${secret_name}" --ignore-not-found echo "${context}: Creating ${namespace}/${secret_name} pull secret" kubectl --context "${context}" -n "${namespace}" create secret generic "${secret_name}" \ - --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson + --from-file=.dockerconfigjson="${config_file}" --type=kubernetes.io/dockerconfigjson else echo "Skipping creating pull secret in ${context}/${namespace}. The namespace doesn't exist yet." fi From 02073269560d6b9413690a3e08aea7f24f57562d Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Fri, 1 Aug 2025 17:38:59 +0200 Subject: [PATCH 040/164] handle path --- scripts/dev/configure_docker_auth.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_docker_auth.sh index 4edf68817..a36896970 100755 --- a/scripts/dev/configure_docker_auth.sh +++ b/scripts/dev/configure_docker_auth.sh @@ -87,7 +87,7 @@ container_login() { local registry="$2" if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then - sudo podman login --username "${username}" --password-stdin "${registry}" + sudo podman login --authfile "${CONFIG_PATH}" --username "${username}" --password-stdin "${registry}" else docker login --username "${username}" --password-stdin "${registry}" fi From 0bd74d88434f361420c2f29da20b304ef6351155 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Fri, 1 Aug 2025 18:05:12 +0200 Subject: [PATCH 041/164] handle auth issue for secret --- scripts/funcs/kubernetes | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/scripts/funcs/kubernetes b/scripts/funcs/kubernetes index d4426ef3c..11250422d 100644 --- a/scripts/funcs/kubernetes +++ b/scripts/funcs/kubernetes @@ -101,10 +101,17 @@ create_image_registries_secret() { # Detect the correct config file path based on container runtime local config_file + local temp_config_file="" if command -v podman &> /dev/null && (podman info &> /dev/null || sudo podman info &> /dev/null); then # For Podman, use root's auth.json since minikube uses sudo podman config_file="/root/.config/containers/auth.json" echo "Using Podman config: ${config_file}" + + # Create a temporary copy that the current user can read + temp_config_file=$(mktemp) + sudo cp "${config_file}" "${temp_config_file}" + sudo chown "$(whoami):$(whoami)" "${temp_config_file}" + config_file="${temp_config_file}" else # For Docker, use standard docker config config_file="${HOME}/.docker/config.json" @@ -120,6 +127,11 @@ create_image_registries_secret() { else echo "Skipping creating pull secret in ${context}/${namespace}. The namespace doesn't exist yet." fi + + # Clean up temporary file + if [[ -n "${temp_config_file}" ]] && [[ -f "${temp_config_file}" ]]; then + rm -f "${temp_config_file}" + fi } echo "Creating/updating pull secret from docker configured file" From 5cfdd82897e053dc1c641d69a6b72fa34efa00df Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Fri, 1 Aug 2025 19:38:51 +0200 Subject: [PATCH 042/164] use lucian evg and remove debug mode --- scripts/dev/contexts/evg-private-context | 8 ++++++++ scripts/evergreen/e2e/e2e.sh | 2 +- scripts/evergreen/setup_minikube_host.sh | 2 +- scripts/evergreen/teardown_kubernetes_environment.sh | 2 +- 4 files changed, 11 insertions(+), 3 deletions(-) diff --git a/scripts/dev/contexts/evg-private-context b/scripts/dev/contexts/evg-private-context index 8f25842e8..8c1dfd672 100644 --- a/scripts/dev/contexts/evg-private-context +++ b/scripts/dev/contexts/evg-private-context @@ -122,3 +122,11 @@ export cognito_workload_url="${cognito_workload_url}" export cognito_workload_user_id="${cognito_workload_user_id}" export MDB_UPDATE_LICENSES=true + +export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" +export REGISTRY="${BASE_REPO_URL}" +export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" +export OPERATOR_REGISTRY=${BASE_REPO_URL} +export DATABASE_REGISTRY=${BASE_REPO_URL} +export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} + diff --git a/scripts/evergreen/e2e/e2e.sh b/scripts/evergreen/e2e/e2e.sh index eb6140a22..12f839b86 100755 --- a/scripts/evergreen/e2e/e2e.sh +++ b/scripts/evergreen/e2e/e2e.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -set -Eeoux pipefail +set -Eeou pipefail start_time=$(date +%s) diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index f4f5d643b..cc85dbcd4 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -5,7 +5,7 @@ # Can be run on static hosts for testing and verification source scripts/dev/set_env_context.sh -set -Eeoux pipefail +set -Eeou pipefail echo "==========================================" echo "Setting up minikube host with multi-architecture support" diff --git a/scripts/evergreen/teardown_kubernetes_environment.sh b/scripts/evergreen/teardown_kubernetes_environment.sh index ec0c59966..a9babfbed 100755 --- a/scripts/evergreen/teardown_kubernetes_environment.sh +++ b/scripts/evergreen/teardown_kubernetes_environment.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -set -Eeoux pipefail +set -Eeou pipefail source scripts/dev/set_env_context.sh From 3755fdd4753bb2d5124f19a60f505f56d2c28fcd Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Mon, 4 Aug 2025 10:59:38 +0300 Subject: [PATCH 043/164] Fix conflicts --- docker/mongodb-agent-non-matrix/Dockerfile | 15 +++++---------- 1 file changed, 5 insertions(+), 10 deletions(-) diff --git a/docker/mongodb-agent-non-matrix/Dockerfile b/docker/mongodb-agent-non-matrix/Dockerfile index 03554a21b..52f448b1d 100644 --- a/docker/mongodb-agent-non-matrix/Dockerfile +++ b/docker/mongodb-agent-non-matrix/Dockerfile @@ -35,7 +35,6 @@ FROM registry.access.redhat.com/ubi9/ubi-minimal ARG TARGETARCH COPY --from=tools_downloader "/data/${TARGETARCH}/mongodb_tools.tgz" /tools/mongodb_tools.tgz COPY --from=agent_downloader "/data/${TARGETARCH}/mongodb_agent.tgz" /agent/mongodb_agent.tgz -COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE # Replace libcurl-minimal and curl-minimal with the full versions # https://bugzilla.redhat.com/show_bug.cgi?id=1994521 @@ -65,16 +64,12 @@ RUN mkdir -p /agent \ && touch /var/log/mongodb-mms-automation/readiness.log \ && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log - -COPY --from=base /data/mongodb-agent.tar.gz /agent -COPY --from=base /data/mongodb-tools.tgz /agent -COPY --from=base /data/LICENSE /licenses/LICENSE - # Copy scripts to a safe location that won't be overwritten by volume mount -COPY --from=base /opt/scripts/agent-launcher-shim.sh /usr/local/bin/agent-launcher-shim.sh -COPY --from=base /opt/scripts/setup-agent-files.sh /usr/local/bin/setup-agent-files.sh -COPY --from=base /opt/scripts/dummy-probe.sh /usr/local/bin/dummy-probe.sh -COPY --from=base /opt/scripts/dummy-readinessprobe.sh /usr/local/bin/dummy-readinessprobe +COPY ./docker/mongodb-agent/agent-launcher-shim.sh /usr/local/bin/agent-launcher-shim.sh +COPY ./docker/mongodb-agent/setup-agent-files.sh /usr/local/bin/setup-agent-files.sh +COPY ./docker/mongodb-agent/dummy-probe.sh /usr/local/bin/dummy-probe.sh +COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /usr/local/bin/dummy-readinessprobe +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /licenses/LICENSE RUN tar xfz /agent/mongodb_agent.tgz \ && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ From 1fbb8d5df632af82057bf9505ad694e6a20a2f28 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 10:07:37 +0200 Subject: [PATCH 044/164] Rename arch -> platform --- scripts/release/atomic_pipeline.py | 39 ++++++++---------------------- 1 file changed, 10 insertions(+), 29 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index f1e643f06..59c8fdf76 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -8,6 +8,7 @@ import shutil from concurrent.futures import ProcessPoolExecutor from copy import copy +from platform import architecture from queue import Queue from typing import Callable, Dict, List, Optional, Tuple, Union @@ -21,7 +22,6 @@ get_supported_operator_versions, ) from scripts.evergreen.release.images_signing import ( - mongodb_artifactory_login, sign_image, verify_signature, ) @@ -35,8 +35,6 @@ TRACER = trace.get_tracer("evergreen-agent") DEFAULT_NAMESPACE = "default" -# TODO: rename architecture -> platform everywhere - def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: if value is None: @@ -85,14 +83,6 @@ def pipeline_process_image( if dockerfile_args: span.set_attribute("mck.build_args", str(dockerfile_args)) - # TODO use these? - build_options = { - # Will continue building an image if it finds an error. See next comment. - "continue_on_errors": True, - # But will still fail after all the tasks have completed - "fail_on_errors": True, - } - logger.info(f"Dockerfile args: {dockerfile_args}, for image: {image_name}") if not dockerfile_args: @@ -145,8 +135,7 @@ def produce_sbom(args): elif args["platform"] == "amd64": platform = "linux/amd64" else: - # TODO: return here? - logger.error(f"Unrecognized architectures in {args}. Skipping SBOM generation") + raise ValueError(f"Unrecognized platform in {args}. Cannot proceed with SBOM generation") generate_sbom(image_pull_spec, platform) @@ -259,11 +248,11 @@ def build_CLI_SBOM(build_configuration: BuildConfiguration): return if build_configuration.platforms is None or len(build_configuration.platforms) == 0: - architectures = ["linux/amd64", "linux/arm64", "darwin/arm64", "darwin/amd64"] + platforms = ["linux/amd64", "linux/arm64", "darwin/arm64", "darwin/amd64"] elif "arm64" in build_configuration.platforms: - architectures = ["linux/arm64", "darwin/arm64"] + platforms = ["linux/arm64", "darwin/arm64"] elif "amd64" in build_configuration.platforms: - architectures = ["linux/amd64", "darwin/amd64"] + platforms = ["linux/amd64", "darwin/amd64"] else: logger.error(f"Unrecognized architectures {build_configuration.platforms}. Skipping SBOM generation") return @@ -271,8 +260,8 @@ def build_CLI_SBOM(build_configuration: BuildConfiguration): release = load_release_file() version = release["mongodbOperator"] - for architecture in architectures: - generate_sbom_for_cli(version, architecture) + for platform in platforms: + generate_sbom_for_cli(version, platform) def should_skip_arm64(): @@ -383,23 +372,21 @@ def build_image_generic( is_multi_arch: bool = False, ): """ - Build one or more architecture-specific images, then (optionally) + Build one or more platform-specific images, then (optionally) push a manifest and sign the result. """ - # 1) Defaults registry = build_configuration.base_registry args_list = multi_arch_args_list or [extra_args or {}] version = args_list[0].get("version", "") - architectures = [args.get("architecture") for args in args_list] + platforms = [args.get("architecture") for args in args_list] - # 2) Build each arch for base_args in args_list: # merge in the registry without mutating caller’s dict build_args = {**base_args, "quay_registry": registry} logger.debug(f"Build args: {build_args}") - for arch in architectures: + for arch in platforms: logger.debug(f"Building {image_name} for arch={arch}") logger.debug(f"build image generic - registry={registry}") pipeline_process_image( @@ -410,11 +397,6 @@ def build_image_generic( with_sbom=False, ) - # # 3) Multi-arch manifest - # if is_multi_arch: - # create_and_push_manifest(registry + "/" + image_name, version, architectures=architectures) - - # 4) Signing (only on real releases) if build_configuration.sign: sign_image(registry, version) verify_signature(registry, version) @@ -600,7 +582,6 @@ def build_multi_arch_agent_in_sonar( ) -# TODO: Observed rate limiting (429) sometimes for agent builds in patches def build_agent_default_case(build_configuration: BuildConfiguration): """ Build the agent only for the latest operator for patches and operator releases. From 7c0cfbd3e2ba1dfeb17d1f3beec0c59a464730be Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 4 Aug 2025 11:07:17 +0200 Subject: [PATCH 045/164] fix z and workdir setup --- .evergreen-functions.yml | 15 ++++++++------- scripts/evergreen/setup_jq.sh | 5 ++--- scripts/evergreen/setup_minikube_host.sh | 2 +- scripts/funcs/printing | 23 +++++++++++++++++++++++ 4 files changed, 34 insertions(+), 11 deletions(-) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index c67634152..8cf892a2b 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -51,6 +51,13 @@ functions: ### Setup Functions ### + setup_jq: &setup_jq + command: subprocess.exec + type: setup + params: + working_dir: src/github.com/mongodb/mongodb-kubernetes + binary: scripts/evergreen/setup_jq.sh + setup_context: &setup_context # Running the first switch is important to fill the workdir and other important initial env vars command: shell.exec type: setup @@ -103,6 +110,7 @@ functions: type: setup params: command: "git config --global user.email 'kubernetes-hosted-team@mongodb.com'" + - *setup_jq # we need jq in the context - *setup_context setup_kubectl: &setup_kubectl @@ -112,13 +120,6 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes binary: scripts/evergreen/setup_kubectl.sh - setup_jq: &setup_jq - command: subprocess.exec - type: setup - params: - working_dir: src/github.com/mongodb/mongodb-kubernetes - binary: scripts/evergreen/setup_jq.sh - setup_shellcheck: command: subprocess.exec type: setup diff --git a/scripts/evergreen/setup_jq.sh b/scripts/evergreen/setup_jq.sh index 1f260883a..8905d4d46 100755 --- a/scripts/evergreen/setup_jq.sh +++ b/scripts/evergreen/setup_jq.sh @@ -7,14 +7,13 @@ set -Eeou pipefail -source scripts/dev/set_env_context.sh source scripts/funcs/install # Detect and map architecture for jq releases detect_jq_architecture() { local arch arch=$(uname -m) - + case "${arch}" in x86_64) echo "amd64" @@ -38,4 +37,4 @@ detect_jq_architecture() { jq_arch=$(detect_jq_architecture) echo "Detected architecture: $(uname -m), using jq architecture: ${jq_arch}" -download_and_install_binary "${PROJECT_DIR:-.}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${jq_arch}" +download_and_install_binary "${PROJECT_DIR:-${workdir:-.}}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${jq_arch}" diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index cc85dbcd4..f4f5d643b 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -5,7 +5,7 @@ # Can be run on static hosts for testing and verification source scripts/dev/set_env_context.sh -set -Eeou pipefail +set -Eeoux pipefail echo "==========================================" echo "Setting up minikube host with multi-architecture support" diff --git a/scripts/funcs/printing b/scripts/funcs/printing index 0ee4b5ec0..7d55e5af9 100644 --- a/scripts/funcs/printing +++ b/scripts/funcs/printing @@ -21,3 +21,26 @@ prepend() { export RED='\033[0;31m' export NO_COLOR='\033[0m' + +run_setup_step() { + local step_name="$1" + shift + local script_command=("$@") + + echo "" + echo ">>> Running: ${step_name}" + echo ">>> Command: ${script_command[*]}" + + local script_path="${script_command[0]}" + if [[ -f "${script_path}" ]]; then + if "${script_command[@]}"; then + echo "✅ ${step_name} completed successfully" + else + echo "❌ ${step_name} failed" + exit 1 + fi + else + echo "❌ Script not found: ${script_path}" + exit 1 + fi +} From 718d3cc409e2f3596ebc322924fce60eb26059b4 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 4 Aug 2025 11:07:32 +0200 Subject: [PATCH 046/164] fix z and workdir setup --- scripts/funcs/printing | 23 ----------------------- 1 file changed, 23 deletions(-) diff --git a/scripts/funcs/printing b/scripts/funcs/printing index 7d55e5af9..0ee4b5ec0 100644 --- a/scripts/funcs/printing +++ b/scripts/funcs/printing @@ -21,26 +21,3 @@ prepend() { export RED='\033[0;31m' export NO_COLOR='\033[0m' - -run_setup_step() { - local step_name="$1" - shift - local script_command=("$@") - - echo "" - echo ">>> Running: ${step_name}" - echo ">>> Command: ${script_command[*]}" - - local script_path="${script_command[0]}" - if [[ -f "${script_path}" ]]; then - if "${script_command[@]}"; then - echo "✅ ${step_name} completed successfully" - else - echo "❌ ${step_name} failed" - exit 1 - fi - else - echo "❌ Script not found: ${script_path}" - exit 1 - fi -} From e9a524f307046cceb8f41e80599abfb169018005 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 11:08:45 +0200 Subject: [PATCH 047/164] Don't rely on exception to check for builder existence --- scripts/release/build_images.py | 29 +++++++++++++---------------- 1 file changed, 13 insertions(+), 16 deletions(-) diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 823d187b4..4ffcae04c 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -1,7 +1,5 @@ # This file is the new Sonar import base64 -import sys -import time from typing import Dict import boto3 @@ -11,7 +9,6 @@ import docker from lib.base_logger import logger -from lib.sonar.sonar import create_ecr_repository from scripts.evergreen.release.images_signing import sign_image, verify_signature @@ -42,7 +39,6 @@ def ecr_login_boto3(region: str, account_id: str): logger.debug(f"ECR login succeeded: {status}") -# TODO: use builders = docker.buildx.list() instead of an exception def ensure_buildx_builder(builder_name: str = "multiarch") -> str: """ Ensures a Docker Buildx builder exists for multi-platform builds. @@ -50,8 +46,15 @@ def ensure_buildx_builder(builder_name: str = "multiarch") -> str: :param builder_name: Name for the buildx builder :return: The builder name that was created or reused """ + docker = python_on_whales.docker + existing_builders = docker.buildx.list() + if any(b.name == builder_name for b in existing_builders): + logger.info(f"Builder '{builder_name}' already exists – reusing it.") + docker.buildx.use(builder_name) + return builder_name + try: docker.buildx.create( name=builder_name, @@ -61,14 +64,8 @@ def ensure_buildx_builder(builder_name: str = "multiarch") -> str: ) logger.info(f"Created new buildx builder: {builder_name}") except DockerException as e: - if f'existing instance for "{builder_name}"' in str(e): - logger.info(f"Builder '{builder_name}' already exists – reusing it.") - # Make sure it's the current one: - docker.buildx.use(builder_name) - else: - # Some other failure happened - logger.error(f"Failed to create buildx builder: {e}") - raise + logger.error(f"Failed to create buildx builder: {e}") + raise return builder_name @@ -81,7 +78,7 @@ def build_image( :param tag: Image tag (name:tag) :param dockerfile: Name or relative path of the Dockerfile within `path` - :param path: Build context path (directory with your Dockerfile) + :param path: Build context path (directory with the Dockerfile) :param args: Build arguments dictionary :param push: Whether to push the image after building :param platforms: List of target platforms (e.g., ["linux/amd64", "linux/arm64"]) @@ -106,7 +103,7 @@ def build_image( if len(platforms) > 1: logger.info(f"Multi-platform build for {len(platforms)} architectures") - # We need a special driver to handle multi platform builds + # We need a special driver to handle multi-platform builds builder_name = ensure_buildx_builder("multiarch") # Build the image using buildx @@ -140,8 +137,8 @@ def process_image( build_path: str = ".", push: bool = True, ): - # Login to ECR using boto3 - ecr_login_boto3(region="us-east-1", account_id="268558157000") # TODO: use environment variables + # Login to ECR + ecr_login_boto3(region="us-east-1", account_id="268558157000") docker_registry = f"{base_registry}/{image_name}" image_full_uri = f"{docker_registry}:{image_tag}" From fa6b89918ca2be0d62cd98abed8771c6d63ad40d Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 11:08:59 +0200 Subject: [PATCH 048/164] Remove unused variables --- scripts/release/atomic_pipeline.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 59c8fdf76..734f3c519 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -8,7 +8,6 @@ import shutil from concurrent.futures import ProcessPoolExecutor from copy import copy -from platform import architecture from queue import Queue from typing import Callable, Dict, List, Optional, Tuple, Union @@ -232,7 +231,6 @@ def build_database_image(build_configuration: BuildConfiguration): Builds a new database image. """ release = load_release_file() - version = release["databaseImageVersion"] args = {"version": build_configuration.version} build_image_generic( image_name="mongodb-kubernetes-database", @@ -328,8 +326,6 @@ def find_om_url(om_version: str) -> str: def build_init_om_image(build_configuration: BuildConfiguration): - release = load_release_file() - version = release["initOpsManagerVersion"] args = {"version": build_configuration.version} build_image_generic( image_name="mongodb-kubernetes-init-ops-manager", @@ -404,7 +400,6 @@ def build_image_generic( def build_init_appdb(build_configuration: BuildConfiguration): release = load_release_file() - version = release["initAppDbVersion"] base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} @@ -419,7 +414,6 @@ def build_init_appdb(build_configuration: BuildConfiguration): # TODO: nam static: remove this once static containers becomes the default def build_init_database(build_configuration: BuildConfiguration): release = load_release_file() - version = release["initDatabaseVersion"] # comes from release.json base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} @@ -576,7 +570,7 @@ def build_multi_arch_agent_in_sonar( build_image_generic( image_name="mongodb-agent-ubi", dockerfile_path="docker/mongodb-agent-non-matrix/Dockerfile", - build_configuration=build_config_copy, #TODO: why ? + build_configuration=build_configuration, is_multi_arch=True, multi_arch_args_list=joined_args, ) From 426e522b8bd023f0726149f677b64d570320d28b Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 11:09:13 +0200 Subject: [PATCH 049/164] Pre commit --- ...godb-kubernetes.clusterserviceversion.yaml | 33 ++++++++----------- helm_chart/Chart.yaml | 5 ++- 2 files changed, 16 insertions(+), 22 deletions(-) diff --git a/config/manifests/bases/mongodb-kubernetes.clusterserviceversion.yaml b/config/manifests/bases/mongodb-kubernetes.clusterserviceversion.yaml index 84c3455fc..c64c8cc25 100644 --- a/config/manifests/bases/mongodb-kubernetes.clusterserviceversion.yaml +++ b/config/manifests/bases/mongodb-kubernetes.clusterserviceversion.yaml @@ -8,9 +8,9 @@ metadata: certified: "true" containerImage: quay.io/mongodb/mongodb-kubernetes:1.2.0 createdAt: "" - description: The MongoDB Controllers for Kubernetes enable easy deploys of - MongoDB into Kubernetes clusters, using our management, monitoring and - backup platforms, Ops Manager and Cloud Manager. + description: The MongoDB Controllers for Kubernetes enable easy deploys of MongoDB + into Kubernetes clusters, using our management, monitoring and backup platforms, + Ops Manager and Cloud Manager. features.operators.openshift.io/disconnected: "true" features.operators.openshift.io/fips-compliant: "false" features.operators.openshift.io/proxy-aware: "false" @@ -51,8 +51,7 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - urn:alm:descriptor:com.tectonic.ui:fieldGroup:ClusterConfiguration - - description: In a Replica Set deployment type, specifies the amount of - members. + - description: In a Replica Set deployment type, specifies the amount of members. displayName: Members of a Replica Set path: members x-descriptors: @@ -66,8 +65,7 @@ spec: - description: Project configuration for this deployment displayName: Ops Manager project configuration path: opsManager - - description: Name of the ConfigMap with the configuration for this - project + - description: Name of the ConfigMap with the configuration for this project displayName: Ops Manager Project Configuration path: opsManager.configMapRef.name x-descriptors: @@ -166,8 +164,7 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - urn:alm:descriptor:com.tectonic.ui:fieldGroup:ClusterConfiguration - - description: In a Replica Set deployment type, specifies the amount of - members. + - description: In a Replica Set deployment type, specifies the amount of members. displayName: Members of a Replica Set path: members x-descriptors: @@ -181,8 +178,7 @@ spec: - description: Project configuration for this deployment displayName: Ops Manager project configuration path: opsManager - - description: Name of the ConfigMap with the configuration for this - project + - description: Name of the ConfigMap with the configuration for this project displayName: Ops Manager Project Configuration path: opsManager.configMapRef.name x-descriptors: @@ -194,8 +190,8 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:booleanSwitch - urn:alm:descriptor:com.tectonic.ui:fieldGroup:ClusterConfiguration - - description: Optional. Specify whether to duplicate service objects - among different Kubernetes clusters. + - description: Optional. Specify whether to duplicate service objects among + different Kubernetes clusters. displayName: Duplicate Service Objects path: duplicateServiceObjects x-descriptors: @@ -256,8 +252,7 @@ spec: path: passwordSecretKeyRef.name x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - displayName: Name of the MongoDB resource to which this user is - associated. + - displayName: Name of the MongoDB resource to which this user is associated. path: mongodbResourceRef.name x-descriptors: - urn:alm:descriptor:io.kubernetes:mongodb @@ -313,8 +308,8 @@ spec: x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - urn:alm:descriptor:com.tectonic.ui:fieldGroup:OpsManagerConfiguration - - displayName: Secret to enable TLS for Ops Manager allowing it to serve - traffic over HTTPS. + - displayName: Secret to enable TLS for Ops Manager allowing it to serve traffic + over HTTPS. path: security.tls.secretRef.name x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret @@ -324,8 +319,8 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:fieldGroup:ApplicationDatabase - - displayName: Secret containing the TLS certificate signed by known or - custom CA. + - displayName: Secret containing the TLS certificate signed by known or custom + CA. path: applicationDatabase.security.tls.secretRef.name x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret diff --git a/helm_chart/Chart.yaml b/helm_chart/Chart.yaml index 65bae41cc..8cfcfd8ef 100644 --- a/helm_chart/Chart.yaml +++ b/helm_chart/Chart.yaml @@ -1,8 +1,7 @@ apiVersion: v2 name: mongodb-kubernetes -description: MongoDB Controllers for Kubernetes translate the human knowledge of - creating a MongoDB instance into a scalable, repeatable, and standardized - method. +description: MongoDB Controllers for Kubernetes translate the human knowledge of creating + a MongoDB instance into a scalable, repeatable, and standardized method. version: 1.2.0 kubeVersion: '>=1.16-0' type: application From 69611757376553977aba11f38c0828369d2962b6 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 4 Aug 2025 11:29:58 +0200 Subject: [PATCH 050/164] include e2e --- .evergreen-functions.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index 8cf892a2b..cec31b3d1 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -54,6 +54,7 @@ functions: setup_jq: &setup_jq command: subprocess.exec type: setup + <<: *e2e_include_expansions_in_env params: working_dir: src/github.com/mongodb/mongodb-kubernetes binary: scripts/evergreen/setup_jq.sh From 689085842d937485819a8287e3c1994d12edae60 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 13:15:51 +0200 Subject: [PATCH 051/164] Cleanup --- scripts/release/atomic_pipeline.py | 29 ----------------------------- scripts/release/main.py | 5 ++--- 2 files changed, 2 insertions(+), 32 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 734f3c519..c35ce8942 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -710,7 +710,6 @@ def _build_agent_operator( executor: ProcessPoolExecutor, operator_version: str, tasks_queue: Queue, - use_quay: bool = False, ): agent_distro = "rhel9_x86_64" tools_version = agent_version[1] @@ -794,31 +793,3 @@ def gather_latest_agent_versions(release: Dict) -> List[Tuple[str, str]]: agent_versions_to_build.append(("107.0.12.8669-1", "100.10.0")) return sorted(list(set(agent_versions_to_build))) - - -def get_builder_function_for_image_name() -> Dict[str, Callable]: - """Returns a dictionary of image names that can be built.""" - - image_builders = { - "cli": build_CLI_SBOM, - "test": build_tests_image, - "operator": build_operator_image, - "mco-test": build_mco_tests_image, - # TODO: add support to build this per patch - "readiness-probe": build_readiness_probe_image, - "upgrade-hook": build_upgrade_hook_image, - "operator-quick": build_operator_image_patch, - "database": build_database_image, - "agent-pct": build_agent_on_agent_bump, - "agent": build_agent_default_case, - # - # Init images - "init-appdb": build_init_appdb, - "init-database": build_init_database, - "init-ops-manager": build_init_om_image, - # - # Ops Manager image - "ops-manager": build_om_image, - } - - return image_builders diff --git a/scripts/release/main.py b/scripts/release/main.py index 3a7e4a5f5..109a8071c 100644 --- a/scripts/release/main.py +++ b/scripts/release/main.py @@ -41,11 +41,12 @@ ) """ -The goal of main.py, build_configuration.py and build_context.py is to provide a single source of truth for the build +The goal of main.py, build_configuration.py and build_context.py is to provide a single source of truth for the build configuration. All parameters that depend on the the build environment (local dev, evg, etc) should be resolved here and not in the pipeline. """ +SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] def get_builder_function_for_image_name() -> Dict[str, Callable]: """Returns a dictionary of image names that can be built.""" @@ -55,7 +56,6 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: "test": build_tests_image, "operator": build_operator_image, "mco-test": build_mco_tests_image, - # TODO: add support to build this per patch "readiness-probe": build_readiness_probe_image, "upgrade-hook": build_upgrade_hook_image, "operator-quick": build_operator_image_patch, @@ -170,7 +170,6 @@ def build_config_from_args(args): # Parse platform argument (comma-separated) platforms = [p.strip() for p in args.platform.split(",")] - SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] if any(p not in SUPPORTED_PLATFORMS for p in platforms): logger.error( f"Unsupported platform in '{args.platform}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}" From aab959236a0b1e0c906ae84a03f2e419574793b4 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 13:16:00 +0200 Subject: [PATCH 052/164] Correct build envs --- scripts/release/build_context.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py index c083b1f0a..9a0e1ccd4 100644 --- a/scripts/release/build_context.py +++ b/scripts/release/build_context.py @@ -9,9 +9,9 @@ class BuildScenario(str, Enum): """Represents the context in which the build is running.""" - RELEASE = "release" # Official release build from a git tag + RELEASE = "release" # Official release triggered by a git tag PATCH = "patch" # CI build for a patch/pull request - MASTER = "master" # CI build from a merge to the master + STAGING = "staging" # CI build from a merge to the master DEVELOPMENT = "development" # Local build on a developer machine @classmethod @@ -23,15 +23,14 @@ def infer_scenario_from_environment(cls) -> "BuildScenario": patch_id = os.getenv("version_id") if git_tag: - scenario = BuildScenario.RELEASE # TODO: git tag won't trigger the pipeline, only the promotion process + # Release scenario and the git tag will be used for promotion process only + scenario = BuildScenario.RELEASE logger.info(f"Build scenario: {scenario} (git_tag: {git_tag})") elif is_patch: scenario = BuildScenario.PATCH logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") elif is_evg: - scenario = ( - BuildScenario.MASTER - ) # TODO: MASTER -> Staging + scenario = BuildScenario.STAGING logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") else: scenario = BuildScenario.DEVELOPMENT @@ -63,7 +62,7 @@ def from_scenario(cls, scenario: BuildScenario) -> "BuildContext": git_tag=git_tag, patch_id=patch_id, signing_enabled=signing_enabled, - version=git_tag or patch_id, # TODO: update this + version=git_tag or patch_id, ) def get_version(self) -> str: @@ -76,7 +75,8 @@ def get_version(self) -> str: def get_base_registry(self) -> str: """Get the base registry URL for the current scenario.""" - if self.scenario == BuildScenario.RELEASE: + # TODO CLOUDP-335471: when working on the promotion process, use the prod registry variable in RELEASE scenario + if self.scenario == BuildScenario.STAGING: return os.environ.get("STAGING_REPO_URL") else: return os.environ.get("BASE_REPO_URL") From 33173bb8cb17f328a5b9538295bff86d73d512ef Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 13:16:28 +0200 Subject: [PATCH 053/164] Lindt --- scripts/release/main.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/release/main.py b/scripts/release/main.py index 109a8071c..169a81503 100644 --- a/scripts/release/main.py +++ b/scripts/release/main.py @@ -48,6 +48,7 @@ SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] + def get_builder_function_for_image_name() -> Dict[str, Callable]: """Returns a dictionary of image names that can be built.""" From f48c19ea48c960f4e735485f9a8ebabf3a02014f Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 4 Aug 2025 14:52:32 +0200 Subject: [PATCH 054/164] skip pip iunstall --- .evergreen-functions.yml | 6 +++++- scripts/dev/configure_docker_auth.sh | 2 +- scripts/dev/recreate_python_venv.sh | 11 +++++++++-- scripts/evergreen/setup_jq.sh | 2 +- scripts/evergreen/setup_minikube_host.sh | 3 ++- 5 files changed, 18 insertions(+), 6 deletions(-) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index cec31b3d1..5d3ad081a 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -54,8 +54,10 @@ functions: setup_jq: &setup_jq command: subprocess.exec type: setup - <<: *e2e_include_expansions_in_env params: + <<: *e2e_include_expansions_in_env + add_to_path: + - ${workdir}/bin working_dir: src/github.com/mongodb/mongodb-kubernetes binary: scripts/evergreen/setup_jq.sh @@ -63,6 +65,8 @@ functions: command: shell.exec type: setup params: + add_to_path: + - ${workdir}/bin shell: bash working_dir: src/github.com/mongodb/mongodb-kubernetes <<: *e2e_include_expansions_in_env diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_docker_auth.sh index a36896970..761a9ff84 100755 --- a/scripts/dev/configure_docker_auth.sh +++ b/scripts/dev/configure_docker_auth.sh @@ -180,7 +180,7 @@ if [[ -n "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON:-}" ]]; then config_tmp=$(mktemp) echo "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON}" | base64 -d > "${quay_io_auth_file}" if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then - sudo jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" + sudo "${PROJECT_DIR:-.}/bin/jq" -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" sudo mv "${config_tmp}" "${CONFIG_PATH}" else jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" diff --git a/scripts/dev/recreate_python_venv.sh b/scripts/dev/recreate_python_venv.sh index bd8b411db..23a1e28c6 100755 --- a/scripts/dev/recreate_python_venv.sh +++ b/scripts/dev/recreate_python_venv.sh @@ -69,8 +69,15 @@ echo "Using python from the following path: ${python_bin}" "${python_bin}" -m venv venv source venv/bin/activate pip install --upgrade pip -echo "Installing requirements.txt..." -pip install -r requirements.txt + +skip_requirements="${SKIP_INSTALL_REQUIREMENTS:-false}" +if [[ "${skip_requirements}" != "true" ]]; then + echo "Installing requirements.txt..." + pip install -r requirements.txt +else + echo "Skipping requirements.txt installation." +fi + echo "Python venv was recreated successfully." echo "Current python path: $(which python)" python --version diff --git a/scripts/evergreen/setup_jq.sh b/scripts/evergreen/setup_jq.sh index 8905d4d46..ed09a4dfe 100755 --- a/scripts/evergreen/setup_jq.sh +++ b/scripts/evergreen/setup_jq.sh @@ -37,4 +37,4 @@ detect_jq_architecture() { jq_arch=$(detect_jq_architecture) echo "Detected architecture: $(uname -m), using jq architecture: ${jq_arch}" -download_and_install_binary "${PROJECT_DIR:-${workdir:-.}}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${jq_arch}" +download_and_install_binary "${PROJECT_DIR:-${workdir}}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${jq_arch}" diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index f4f5d643b..2c686aa82 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -38,9 +38,10 @@ run_setup_step() { } # Setup Python environment (needed for AWS CLI pip installation) - export GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 +export SKIP_INSTALL_REQUIREMENTS=true run_setup_step "Python Virtual Environment" "scripts/dev/recreate_python_venv.sh" +pip install requests run_setup_step "AWS CLI Setup" "scripts/evergreen/setup_aws.sh" From 74e867ce415ecf865179090b275456c24b305c35 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 15:30:45 +0200 Subject: [PATCH 055/164] Update Makefile --- Makefile | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/Makefile b/Makefile index 9c45c1c3b..73e43077d 100644 --- a/Makefile +++ b/Makefile @@ -75,13 +75,13 @@ operator: configure-operator build-and-push-operator-image # build-push, (todo) restart database database: aws_login - @ scripts/evergreen/run_python.sh pipeline.py --include database + @ scripts/evergreen/run_python.sh scripts/release/main.py database readiness_probe: aws_login - @ scripts/evergreen/run_python.sh pipeline.py --include readiness-probe + @ scripts/evergreen/run_python.sh scripts/release/main.py readiness-probe upgrade_hook: aws_login - @ scripts/evergreen/run_python.sh pipeline.py --include upgrade-hook + @ scripts/evergreen/run_python.sh scripts/release/main.py upgrade-hook # ensures cluster is up, cleans Kubernetes + OM, build-push-deploy operator, # push-deploy database, create secrets, config map, resources etc @@ -90,7 +90,7 @@ full: build-and-push-images # build-push appdb image appdb: aws_login - @ scripts/evergreen/run_python.sh pipeline.py --include appdb + @ scripts/evergreen/run_python.sh scripts/release/main.py --include appdb # runs the e2e test: make e2e test=e2e_sharded_cluster_pv. The Operator is redeployed before the test, the namespace is cleaned. # The e2e test image is built and pushed together with all main ones (operator, database, init containers) @@ -154,19 +154,19 @@ aws_cleanup: @ scripts/evergreen/prepare_aws.sh build-and-push-operator-image: aws_login - @ scripts/evergreen/run_python.sh pipeline.py --include operator-quick + @ scripts/evergreen/run_python.sh scripts/release/main.py operator-quick build-and-push-database-image: aws_login @ scripts/dev/build_push_database_image build-and-push-test-image: aws_login build-multi-cluster-binary @ if [[ -z "$(local)" ]]; then \ - scripts/evergreen/run_python.sh pipeline.py --include test; \ + scripts/evergreen/run_python.sh scripts/release/main.py test; \ fi build-and-push-mco-test-image: aws_login @ if [[ -z "$(local)" ]]; then \ - scripts/evergreen/run_python.sh pipeline.py --include mco-test; \ + scripts/evergreen/run_python.sh scripts/release/main.py mco-test; \ fi build-multi-cluster-binary: @@ -181,27 +181,27 @@ build-and-push-images: build-and-push-operator-image appdb-init-image om-init-im build-and-push-init-images: appdb-init-image om-init-image database-init-image database-init-image: - @ scripts/evergreen/run_python.sh pipeline.py --include init-database + @ scripts/evergreen/run_python.sh scripts/release/main.py init-database appdb-init-image: - @ scripts/evergreen/run_python.sh pipeline.py --include init-appdb + @ scripts/evergreen/run_python.sh scripts/release/main.py init-appdb # Not setting a parallel-factor will default to 0 which will lead to using all CPUs, that can cause docker to die. # Here we are defaulting to 6, a higher value might work for you. agent-image: - @ scripts/evergreen/run_python.sh pipeline.py --include agent --all-agents --parallel --parallel-factor 6 + @ scripts/evergreen/run_python.sh scripts/release/main.py --all-agents --parallel --parallel-factor 6 agent agent-image-slow: - @ scripts/evergreen/run_python.sh pipeline.py --include agent --parallel-factor 1 + @ scripts/evergreen/run_python.sh scripts/release/main.py --parallel-factor 1 agent operator-image: - @ scripts/evergreen/run_python.sh pipeline.py --include operator + @ scripts/evergreen/run_python.sh scripts/release/main.py operator om-init-image: - @ scripts/evergreen/run_python.sh pipeline.py --include init-ops-manager + @ scripts/evergreen/run_python.sh scripts/release/main.py init-ops-manager om-image: - @ scripts/evergreen/run_python.sh pipeline.py --include ops-manager + @ scripts/evergreen/run_python.sh scripts/release/main.py ops-manager configure-operator: @ scripts/dev/configure_operator.sh From b13b054f937a47e5d3335d7008c53e82b98f08ef Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 15:32:25 +0200 Subject: [PATCH 056/164] Add TODO --- scripts/release/atomic_pipeline.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index c35ce8942..93f5492cf 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -390,7 +390,7 @@ def build_image_generic( dockerfile_path=dockerfile_path, build_configuration=build_configuration, dockerfile_args=build_args, - with_sbom=False, + with_sbom=False, # TODO: specify no SBOM, write folllow up tasks and todo ) if build_configuration.sign: @@ -614,7 +614,6 @@ def build_agent_default_case(build_configuration: BuildConfiguration): executor, build_configuration.version, tasks_queue, - build_configuration.scenario == BuildScenario.RELEASE, ) queue_exception_handling(tasks_queue) From 832ce61c41571ee7f2a32d8bfa37324bbe51400f Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Mon, 4 Aug 2025 15:42:40 +0200 Subject: [PATCH 057/164] Revert "Pre commit" This reverts commit 426e522b8bd023f0726149f677b64d570320d28b. --- ...godb-kubernetes.clusterserviceversion.yaml | 33 +++++++++++-------- helm_chart/Chart.yaml | 5 +-- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/config/manifests/bases/mongodb-kubernetes.clusterserviceversion.yaml b/config/manifests/bases/mongodb-kubernetes.clusterserviceversion.yaml index c64c8cc25..84c3455fc 100644 --- a/config/manifests/bases/mongodb-kubernetes.clusterserviceversion.yaml +++ b/config/manifests/bases/mongodb-kubernetes.clusterserviceversion.yaml @@ -8,9 +8,9 @@ metadata: certified: "true" containerImage: quay.io/mongodb/mongodb-kubernetes:1.2.0 createdAt: "" - description: The MongoDB Controllers for Kubernetes enable easy deploys of MongoDB - into Kubernetes clusters, using our management, monitoring and backup platforms, - Ops Manager and Cloud Manager. + description: The MongoDB Controllers for Kubernetes enable easy deploys of + MongoDB into Kubernetes clusters, using our management, monitoring and + backup platforms, Ops Manager and Cloud Manager. features.operators.openshift.io/disconnected: "true" features.operators.openshift.io/fips-compliant: "false" features.operators.openshift.io/proxy-aware: "false" @@ -51,7 +51,8 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - urn:alm:descriptor:com.tectonic.ui:fieldGroup:ClusterConfiguration - - description: In a Replica Set deployment type, specifies the amount of members. + - description: In a Replica Set deployment type, specifies the amount of + members. displayName: Members of a Replica Set path: members x-descriptors: @@ -65,7 +66,8 @@ spec: - description: Project configuration for this deployment displayName: Ops Manager project configuration path: opsManager - - description: Name of the ConfigMap with the configuration for this project + - description: Name of the ConfigMap with the configuration for this + project displayName: Ops Manager Project Configuration path: opsManager.configMapRef.name x-descriptors: @@ -164,7 +166,8 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:text - urn:alm:descriptor:com.tectonic.ui:fieldGroup:ClusterConfiguration - - description: In a Replica Set deployment type, specifies the amount of members. + - description: In a Replica Set deployment type, specifies the amount of + members. displayName: Members of a Replica Set path: members x-descriptors: @@ -178,7 +181,8 @@ spec: - description: Project configuration for this deployment displayName: Ops Manager project configuration path: opsManager - - description: Name of the ConfigMap with the configuration for this project + - description: Name of the ConfigMap with the configuration for this + project displayName: Ops Manager Project Configuration path: opsManager.configMapRef.name x-descriptors: @@ -190,8 +194,8 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:booleanSwitch - urn:alm:descriptor:com.tectonic.ui:fieldGroup:ClusterConfiguration - - description: Optional. Specify whether to duplicate service objects among - different Kubernetes clusters. + - description: Optional. Specify whether to duplicate service objects + among different Kubernetes clusters. displayName: Duplicate Service Objects path: duplicateServiceObjects x-descriptors: @@ -252,7 +256,8 @@ spec: path: passwordSecretKeyRef.name x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - - displayName: Name of the MongoDB resource to which this user is associated. + - displayName: Name of the MongoDB resource to which this user is + associated. path: mongodbResourceRef.name x-descriptors: - urn:alm:descriptor:io.kubernetes:mongodb @@ -308,8 +313,8 @@ spec: x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret - urn:alm:descriptor:com.tectonic.ui:fieldGroup:OpsManagerConfiguration - - displayName: Secret to enable TLS for Ops Manager allowing it to serve traffic - over HTTPS. + - displayName: Secret to enable TLS for Ops Manager allowing it to serve + traffic over HTTPS. path: security.tls.secretRef.name x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret @@ -319,8 +324,8 @@ spec: x-descriptors: - urn:alm:descriptor:com.tectonic.ui:number - urn:alm:descriptor:com.tectonic.ui:fieldGroup:ApplicationDatabase - - displayName: Secret containing the TLS certificate signed by known or custom - CA. + - displayName: Secret containing the TLS certificate signed by known or + custom CA. path: applicationDatabase.security.tls.secretRef.name x-descriptors: - urn:alm:descriptor:io.kubernetes:Secret diff --git a/helm_chart/Chart.yaml b/helm_chart/Chart.yaml index 8cfcfd8ef..65bae41cc 100644 --- a/helm_chart/Chart.yaml +++ b/helm_chart/Chart.yaml @@ -1,7 +1,8 @@ apiVersion: v2 name: mongodb-kubernetes -description: MongoDB Controllers for Kubernetes translate the human knowledge of creating - a MongoDB instance into a scalable, repeatable, and standardized method. +description: MongoDB Controllers for Kubernetes translate the human knowledge of + creating a MongoDB instance into a scalable, repeatable, and standardized + method. version: 1.2.0 kubeVersion: '>=1.16-0' type: application From ff1a5bcb9128a36114215595f201600db7fb864b Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 4 Aug 2025 17:14:00 +0200 Subject: [PATCH 058/164] fix pythyin installation and wrong busybox --- scripts/dev/recreate_python_venv.sh | 1 + .../test-app/templates/mongodb-enterprise-tests.yaml | 3 +-- scripts/evergreen/e2e/single_e2e.sh | 4 ++-- scripts/evergreen/setup_minikube_host.sh | 1 - 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/scripts/dev/recreate_python_venv.sh b/scripts/dev/recreate_python_venv.sh index 23a1e28c6..68ae7d8e2 100755 --- a/scripts/dev/recreate_python_venv.sh +++ b/scripts/dev/recreate_python_venv.sh @@ -76,6 +76,7 @@ if [[ "${skip_requirements}" != "true" ]]; then pip install -r requirements.txt else echo "Skipping requirements.txt installation." + pip install requests fi echo "Python venv was recreated successfully." diff --git a/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml b/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml index 67661b29b..7d29f8643 100644 --- a/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml +++ b/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml @@ -53,9 +53,8 @@ spec: secretName: test-pod-multi-cluster-config {{ end }} containers: - - image: busybox + - image: registry.k8s.io/pause:3.9 name: keepalive - command: [ "/bin/sh", "-c", "sleep inf" ] volumeMounts: - name: results mountPath: /tmp/results diff --git a/scripts/evergreen/e2e/single_e2e.sh b/scripts/evergreen/e2e/single_e2e.sh index dbb1306da..d63d4bd06 100755 --- a/scripts/evergreen/e2e/single_e2e.sh +++ b/scripts/evergreen/e2e/single_e2e.sh @@ -213,8 +213,8 @@ run_tests() { echo # We need to make sure to access this file after the test has finished - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml -c mongodb-enterprise-operator-tests + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs -c mongodb-enterprise-operator-tests status="$(kubectl --context "${test_pod_context}" get pod "${TEST_APP_PODNAME}" -n "${NAMESPACE}" -o jsonpath="{ .status }" | jq -r '.containerStatuses[] | select(.name == "mongodb-enterprise-operator-tests")'.state.terminated.reason)" [[ "${status}" == "Completed" ]] diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index 2c686aa82..1cd174f9b 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -41,7 +41,6 @@ run_setup_step() { export GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 export SKIP_INSTALL_REQUIREMENTS=true run_setup_step "Python Virtual Environment" "scripts/dev/recreate_python_venv.sh" -pip install requests run_setup_step "AWS CLI Setup" "scripts/evergreen/setup_aws.sh" From 4aa33d9001ee5189c5c766c08a080976e13b6fc8 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Mon, 4 Aug 2025 18:48:12 +0300 Subject: [PATCH 059/164] Fix contexts for smoke tests --- scripts/dev/contexts/e2e_smoke_ibm_power | 4 ++-- scripts/dev/contexts/e2e_smoke_ibm_z | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/dev/contexts/e2e_smoke_ibm_power b/scripts/dev/contexts/e2e_smoke_ibm_power index 4ba998050..b5e8a1368 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_power +++ b/scripts/dev/contexts/e2e_smoke_ibm_power @@ -14,6 +14,6 @@ export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') export CUSTOM_OM_VERSION -export CUSTOM_MDB_VERSION=6.0.5 -export CUSTOM_MDB_PREV_VERSION=5.0.7 +export CUSTOM_MDB_VERSION=6.0.5-ent +export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=minikube diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z index 4ba998050..b5e8a1368 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_z +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -14,6 +14,6 @@ export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') export CUSTOM_OM_VERSION -export CUSTOM_MDB_VERSION=6.0.5 -export CUSTOM_MDB_PREV_VERSION=5.0.7 +export CUSTOM_MDB_VERSION=6.0.5-ent +export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=minikube From cc1f99dba158be7bb8b1ff25cb316815673cd7ed Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 4 Aug 2025 17:48:20 +0200 Subject: [PATCH 060/164] fix contexts --- scripts/dev/contexts/e2e_smoke_ibm_power | 1 + scripts/dev/contexts/e2e_smoke_ibm_z | 1 + scripts/evergreen/setup_kubernetes_environment.sh | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/dev/contexts/e2e_smoke_ibm_power b/scripts/dev/contexts/e2e_smoke_ibm_power index 4ba998050..b0712bfad 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_power +++ b/scripts/dev/contexts/e2e_smoke_ibm_power @@ -17,3 +17,4 @@ export CUSTOM_OM_VERSION export CUSTOM_MDB_VERSION=6.0.5 export CUSTOM_MDB_PREV_VERSION=5.0.7 export KUBE_ENVIRONMENT_NAME=minikube +export CLUSTER_TYPE=minikube diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z index 4ba998050..b0712bfad 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_z +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -17,3 +17,4 @@ export CUSTOM_OM_VERSION export CUSTOM_MDB_VERSION=6.0.5 export CUSTOM_MDB_PREV_VERSION=5.0.7 export KUBE_ENVIRONMENT_NAME=minikube +export CLUSTER_TYPE=minikube diff --git a/scripts/evergreen/setup_kubernetes_environment.sh b/scripts/evergreen/setup_kubernetes_environment.sh index 6edaad50d..fb6e2ee00 100755 --- a/scripts/evergreen/setup_kubernetes_environment.sh +++ b/scripts/evergreen/setup_kubernetes_environment.sh @@ -7,7 +7,7 @@ source scripts/funcs/kubernetes # shellcheck disable=SC2154 bindir="${PROJECT_DIR}/bin" -if [[ "${KUBE_ENVIRONMENT_NAME}" == "vanilla" || ("${KUBE_ENVIRONMENT_NAME}" == "multi" && "${CLUSTER_TYPE}" == "kops") ]]; then +if [[ "${KUBE_ENVIRONMENT_NAME}" == "vanilla" || ("${KUBE_ENVIRONMENT_NAME}" == "multi" && "${CLUSTER_TYPE}" == "minikube") ]]; then export AWS_ACCESS_KEY_ID="${mms_eng_test_aws_access_key:?}" export AWS_SECRET_ACCESS_KEY="${mms_eng_test_aws_secret:?}" export AWS_DEFAULT_REGION="${mms_eng_test_aws_region:?}" From dfdad75338c7224ec757e2f8f577c1f82e350d53 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 4 Aug 2025 19:31:20 +0200 Subject: [PATCH 061/164] don't target one container --- scripts/evergreen/e2e/single_e2e.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/evergreen/e2e/single_e2e.sh b/scripts/evergreen/e2e/single_e2e.sh index d63d4bd06..dbb1306da 100755 --- a/scripts/evergreen/e2e/single_e2e.sh +++ b/scripts/evergreen/e2e/single_e2e.sh @@ -213,8 +213,8 @@ run_tests() { echo # We need to make sure to access this file after the test has finished - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml -c mongodb-enterprise-operator-tests - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs -c mongodb-enterprise-operator-tests + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs status="$(kubectl --context "${test_pod_context}" get pod "${TEST_APP_PODNAME}" -n "${NAMESPACE}" -o jsonpath="{ .status }" | jq -r '.containerStatuses[] | select(.name == "mongodb-enterprise-operator-tests")'.state.terminated.reason)" [[ "${status}" == "Completed" ]] From e9685a8e982f8279ca838a8b1e7e7b548ca25d1e Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 5 Aug 2025 12:59:17 +0200 Subject: [PATCH 062/164] arm support --- .evergreen.yml | 20 +++++ scripts/dev/contexts/e2e_smoke_arm | 30 ++++++++ scripts/dev/contexts/e2e_smoke_ibm_power | 9 +++ scripts/dev/contexts/e2e_smoke_ibm_z | 9 +++ scripts/dev/contexts/evg-private-context | 8 -- scripts/dev/recreate_python_venv.sh | 93 +++++++++++++----------- scripts/evergreen/setup_kind.sh | 9 ++- 7 files changed, 128 insertions(+), 50 deletions(-) create mode 100644 scripts/dev/contexts/e2e_smoke_arm diff --git a/.evergreen.yml b/.evergreen.yml index 136212703..91eed7475 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1194,6 +1194,14 @@ task_groups: - e2e_om_ops_manager_backup <<: *teardown_group + - name: e2e_smoke_arm_task_group + max_hosts: -1 + <<: *setup_group + <<: *setup_and_teardown_task + tasks: + - e2e_replica_set + <<: *teardown_group + - name: e2e_smoke_ibm_task_group max_hosts: -1 <<: *setup_group_ibm @@ -1491,6 +1499,18 @@ buildvariants: tasks: - name: e2e_smoke_ibm_task_group + - name: e2e_smoke_arm + display_name: e2e_smoke_arm + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - ubuntu2204-arm64-large + allowed_requesters: [ "patch", "github_tag" ] +# depends_on: +# - name: build_test_image +# variant: init_test_run + tasks: + - name: e2e_smoke_arm_task_group + - name: e2e_static_smoke display_name: e2e_static_smoke tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] diff --git a/scripts/dev/contexts/e2e_smoke_arm b/scripts/dev/contexts/e2e_smoke_arm new file mode 100644 index 000000000..64568ed5b --- /dev/null +++ b/scripts/dev/contexts/e2e_smoke_arm @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" + +export ops_manager_version="cloud_qa" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION + +export CUSTOM_MDB_VERSION=6.0.5-ent +export CUSTOM_MDB_PREV_VERSION=5.0.7-ent +export KUBE_ENVIRONMENT_NAME=kind +export CLUSTER_TYPE=kind + + +# TODO: change once we have image building +export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" +export REGISTRY="${BASE_REPO_URL}" +export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" +export OPERATOR_REGISTRY=${BASE_REPO_URL} +export DATABASE_REGISTRY=${BASE_REPO_URL} +export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} + diff --git a/scripts/dev/contexts/e2e_smoke_ibm_power b/scripts/dev/contexts/e2e_smoke_ibm_power index 842a30fb5..98f1c22df 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_power +++ b/scripts/dev/contexts/e2e_smoke_ibm_power @@ -18,3 +18,12 @@ export CUSTOM_MDB_VERSION=6.0.5-ent export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=minikube export CLUSTER_TYPE=minikube + +# TODO: change once we have image building +export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" +export REGISTRY="${BASE_REPO_URL}" +export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" +export OPERATOR_REGISTRY=${BASE_REPO_URL} +export DATABASE_REGISTRY=${BASE_REPO_URL} +export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} + diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z index 842a30fb5..98f1c22df 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_z +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -18,3 +18,12 @@ export CUSTOM_MDB_VERSION=6.0.5-ent export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=minikube export CLUSTER_TYPE=minikube + +# TODO: change once we have image building +export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" +export REGISTRY="${BASE_REPO_URL}" +export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" +export OPERATOR_REGISTRY=${BASE_REPO_URL} +export DATABASE_REGISTRY=${BASE_REPO_URL} +export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} + diff --git a/scripts/dev/contexts/evg-private-context b/scripts/dev/contexts/evg-private-context index 8c1dfd672..8f25842e8 100644 --- a/scripts/dev/contexts/evg-private-context +++ b/scripts/dev/contexts/evg-private-context @@ -122,11 +122,3 @@ export cognito_workload_url="${cognito_workload_url}" export cognito_workload_user_id="${cognito_workload_user_id}" export MDB_UPDATE_LICENSES=true - -export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" -export REGISTRY="${BASE_REPO_URL}" -export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" -export OPERATOR_REGISTRY=${BASE_REPO_URL} -export DATABASE_REGISTRY=${BASE_REPO_URL} -export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} - diff --git a/scripts/dev/recreate_python_venv.sh b/scripts/dev/recreate_python_venv.sh index 68ae7d8e2..36ff1e7f5 100755 --- a/scripts/dev/recreate_python_venv.sh +++ b/scripts/dev/recreate_python_venv.sh @@ -4,54 +4,67 @@ set -Eeou pipefail +source scripts/dev/set_env_context.sh + +install_pyenv() { + if command -v pyenv &> /dev/null; then + echo "pyenv already installed" >&2 + return 0 + fi + + echo "Installing pyenv..." >&2 + + # Install pyenv via the official installer + if curl -s -S -L https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash; then + # Add pyenv to PATH for current session + export PYENV_ROOT="${HOME}/.pyenv" + export PATH="${PYENV_ROOT}/bin:${PATH}" + + # Initialize pyenv in current shell + if command -v pyenv &> /dev/null; then + eval "$(pyenv init --path)" + eval "$(pyenv init -)" + fi + + echo "pyenv installed successfully" >&2 + return 0 + else + echo "Failed to install pyenv" >&2 + return 1 + fi +} + ensure_required_python() { - local required_version="${PYTHON_VERSION:-3.10}" + local required_version="${PYTHON_VERSION:-3.13}" local major_minor major_minor=$(echo "${required_version}" | grep -oE '^[0-9]+\.[0-9]+') - echo "Checking for Python ${required_version} (${major_minor}.x)..." >&2 + echo "Setting up Python ${required_version} (${major_minor}.x)..." >&2 - # Check if current python matches required version - if command -v python3 &> /dev/null; then - local version - if version=$(python3 --version 2>&1) && [[ "${version}" == *"Python ${major_minor}"* ]]; then - echo "Found Python ${major_minor}: ${version}" >&2 - echo "python3" - return 0 - else - echo "Current python3 version: ${version}" >&2 - fi + # Always install pyenv first + if ! install_pyenv; then + echo "Error: Failed to install pyenv" >&2 + return 1 fi - # Try to install required Python version using pyenv if available - if command -v pyenv &> /dev/null; then - echo "Python ${major_minor} not found. Attempting to install via pyenv..." >&2 - - # Check if any version in the required series is already installed - if pyenv versions --bare | grep -q "^${major_minor}\."; then - local installed_version - installed_version=$(pyenv versions --bare | grep "^${major_minor}\." | head -1) - echo "Found existing pyenv Python ${major_minor}: ${installed_version}" >&2 - pyenv global "${installed_version}" - echo "python3" - return 0 - fi - - # Install latest version in the required series - local latest_version - latest_version=$(pyenv install --list | grep -E "^[[:space:]]*${major_minor}\.[0-9]+$" | tail -1 | xargs) - if [[ -n "${latest_version}" ]]; then - echo "Installing Python ${latest_version} via pyenv..." >&2 - if pyenv install "${latest_version}"; then - pyenv global "${latest_version}" - echo "python3" - return 0 + # Install latest version in the required series + local latest_version + latest_version=$(pyenv install --list | grep -E "^[[:space:]]*${major_minor}\.[0-9]+$" | tail -1 | xargs) + if [[ -n "${latest_version}" ]]; then + echo "Installing Python ${latest_version} via pyenv..." >&2 + # Use --skip-existing to avoid errors if version already exists + if pyenv install --skip-existing "${latest_version}"; then + pyenv global "${latest_version}" + # Install python3-venv package for Debian/Ubuntu systems if needed + if command -v apt-get &> /dev/null; then + echo "Installing python3-venv package for venv support..." >&2 + sudo apt-get update -qq && sudo apt-get install -y python3-venv || true fi + return 0 fi fi - echo "Error: No suitable Python ${major_minor} installation found and unable to install via pyenv." >&2 - echo "Please ensure Python ${major_minor} is installed or pyenv is available." >&2 + echo "Error: Unable to install Python ${major_minor} via pyenv" >&2 return 1 } @@ -62,11 +75,9 @@ if [[ -d "${PROJECT_DIR}"/venv ]]; then fi # Ensure required Python version is available -python_bin=$(ensure_required_python) - -echo "Using python from the following path: ${python_bin}" +ensure_required_python -"${python_bin}" -m venv venv +python3 -m venv venv source venv/bin/activate pip install --upgrade pip diff --git a/scripts/evergreen/setup_kind.sh b/scripts/evergreen/setup_kind.sh index 96b315a78..b8a907404 100755 --- a/scripts/evergreen/setup_kind.sh +++ b/scripts/evergreen/setup_kind.sh @@ -5,12 +5,19 @@ source scripts/dev/set_env_context.sh # Store the lowercase name of Operating System os=$(uname | tr '[:upper:]' '[:lower:]') +# Detect architecture +arch=$(uname -m) +case ${arch} in + x86_64) arch_suffix="amd64" ;; + aarch64|arm64) arch_suffix="arm64" ;; + *) echo "Unsupported architecture: ${arch}" >&2; exit 1 ;; +esac # This should be changed when needed latest_version="v0.27.0" mkdir -p "${PROJECT_DIR}/bin/" echo "Saving kind to ${PROJECT_DIR}/bin" -curl --retry 3 --silent -L "https://github.com/kubernetes-sigs/kind/releases/download/${latest_version}/kind-${os}-amd64" -o kind +curl --retry 3 --silent -L "https://github.com/kubernetes-sigs/kind/releases/download/${latest_version}/kind-${os}-${arch_suffix}" -o kind chmod +x kind sudo mv kind "${PROJECT_DIR}/bin" From df618e553a2a9a1a9a6edce6f9121891b4d3c3be Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 5 Aug 2025 13:25:50 +0200 Subject: [PATCH 063/164] fix kubectl commands --- .evergreen.yml | 2 +- scripts/dev/install_csi_driver.sh | 1 + scripts/dev/reset_helm.sh | 2 + scripts/dev/switch_context.sh | 12 +++-- scripts/evergreen/e2e/single_e2e.sh | 71 ++++++++++++++++++++++++++++- 5 files changed, 82 insertions(+), 6 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index 91eed7475..d2e5def80 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1197,7 +1197,7 @@ task_groups: - name: e2e_smoke_arm_task_group max_hosts: -1 <<: *setup_group - <<: *setup_and_teardown_task + <<: *setup_and_teardown_task_cloudqa tasks: - e2e_replica_set <<: *teardown_group diff --git a/scripts/dev/install_csi_driver.sh b/scripts/dev/install_csi_driver.sh index 85d365535..d9b3fccc5 100755 --- a/scripts/dev/install_csi_driver.sh +++ b/scripts/dev/install_csi_driver.sh @@ -2,6 +2,7 @@ set -Eeou pipefail +source scripts/dev/set_env_context.sh source scripts/funcs/kubernetes # Path to the deploy script diff --git a/scripts/dev/reset_helm.sh b/scripts/dev/reset_helm.sh index 080cbffb1..15280c56f 100755 --- a/scripts/dev/reset_helm.sh +++ b/scripts/dev/reset_helm.sh @@ -1,5 +1,7 @@ #!/bin/bash +source scripts/dev/set_env_context.sh + # Set Helm release name HELM_RELEASE="mongodb-kubernetes-operator" diff --git a/scripts/dev/switch_context.sh b/scripts/dev/switch_context.sh index 5d07e8fff..e5b25c4e4 100755 --- a/scripts/dev/switch_context.sh +++ b/scripts/dev/switch_context.sh @@ -99,17 +99,23 @@ echo "Generated env files in $(readlink -f "${destination_envs_dir}"):" # shellcheck disable=SC2010 ls -l1 "${destination_envs_dir}" | grep "context" -if which kubectl > /dev/null; then +# Prefer kubectl from bin directory if it exists, otherwise use system kubectl +KUBECTL_CMD="kubectl" +if [[ -n "${PROJECT_DIR:-}" && -x "${PROJECT_DIR}/bin/kubectl" ]]; then + KUBECTL_CMD="${PROJECT_DIR}/bin/kubectl" +fi + +if [[ "$KUBECTL_CMD" != "kubectl" ]] || which kubectl > /dev/null; then if [ "${CLUSTER_NAME-}" ]; then # The convention: the cluster name must match the name of kubectl context # We expect this not to be true if kubernetes cluster is still to be created (minikube/kops) - if ! kubectl config use-context "${CLUSTER_NAME}"; then + if ! "$KUBECTL_CMD" config use-context "${CLUSTER_NAME}"; then echo "Warning: failed to switch kubectl context to: ${CLUSTER_NAME}" echo "Does a matching Kubernetes context exist?" fi # Setting the default namespace for current context - kubectl config set-context "$(kubectl config current-context)" "--namespace=${NAMESPACE}" &>/dev/null || true + "$KUBECTL_CMD" config set-context "$("$KUBECTL_CMD" config current-context)" "--namespace=${NAMESPACE}" &>/dev/null || true # shellcheck disable=SC2153 echo "Current context: ${context} (kubectl context: ${CLUSTER_NAME}), namespace=${NAMESPACE}" diff --git a/scripts/evergreen/e2e/single_e2e.sh b/scripts/evergreen/e2e/single_e2e.sh index dbb1306da..25069e01b 100755 --- a/scripts/evergreen/e2e/single_e2e.sh +++ b/scripts/evergreen/e2e/single_e2e.sh @@ -213,8 +213,75 @@ run_tests() { echo # We need to make sure to access this file after the test has finished - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs + # The /tmp/results directory is shared between containers via a volume + + echo "Attempting to copy myreport.xml (pytest XML report)..." + + # Try multiple approaches to get the XML file + xml_copied=false + + # Approach 1: Copy from keepalive container (should work since volume is shared) + echo "Attempt 1: Copying from keepalive container..." + if kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml -c keepalive 2>/dev/null; then + echo "Successfully copied myreport.xml from keepalive container" + xml_copied=true + else + echo "Failed to copy from keepalive container" + fi + + # Approach 2: Copy from test container (if still available) + if [[ "$xml_copied" == "false" ]]; then + echo "Attempt 2: Copying from test container..." + if kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml -c mongodb-enterprise-operator-tests 2>/dev/null; then + echo "Successfully copied myreport.xml from test container" + xml_copied=true + else + echo "Failed to copy from test container" + fi + fi + + # Approach 3: Try to debug and show what files exist + if [[ "$xml_copied" == "false" ]]; then + echo "Attempt 3: Debugging - checking what files exist..." + # Try to list files using the test container first + if kubectl --context "${test_pod_context}" -n "${NAMESPACE}" exec "${TEST_APP_PODNAME}" -c mongodb-enterprise-operator-tests -- ls -la /tmp/results/ 2>/dev/null; then + echo "Files found in /tmp/results/ from test container" + else + echo "Cannot list files from test container (likely terminated)" + fi + + # Try a wildcard copy to get any XML files + echo "Attempting wildcard copy of any XML files..." + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/ logs/tmp_results -c keepalive 2>/dev/null || true + if [[ -d logs/tmp_results ]]; then + echo "Contents of copied results directory:" + ls -la logs/tmp_results/ || true + # Move any XML files to the expected location + find logs/tmp_results/ -name "*.xml" -exec cp {} logs/myreport.xml \; 2>/dev/null && xml_copied=true + rm -rf logs/tmp_results + fi + fi + + if [[ "$xml_copied" == "true" ]]; then + echo "Successfully obtained myreport.xml" + else + echo "Failed to obtain myreport.xml through any method" + fi + + echo "Attempting to copy diagnostics..." + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs -c keepalive || true + + # Debug: Check what files were actually copied + echo "Contents of logs directory after copy attempts:" + ls -la logs/ || true + echo "Checking if myreport.xml exists and its size:" + if [[ -f logs/myreport.xml ]]; then + echo "myreport.xml exists, size: $(wc -c < logs/myreport.xml) bytes" + echo "First few lines of myreport.xml:" + head -5 logs/myreport.xml || true + else + echo "myreport.xml does not exist in logs directory" + fi status="$(kubectl --context "${test_pod_context}" get pod "${TEST_APP_PODNAME}" -n "${NAMESPACE}" -o jsonpath="{ .status }" | jq -r '.containerStatuses[] | select(.name == "mongodb-enterprise-operator-tests")'.state.terminated.reason)" [[ "${status}" == "Completed" ]] From 6c9bff6407e6aaa19c5930d405371d4e3a3115a5 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 5 Aug 2025 15:39:25 +0200 Subject: [PATCH 064/164] add python auth, fix myreport copy, force docker auth --- .evergreen-functions.yml | 4 +- scripts/dev/configure_docker_auth.sh | 144 ++++++++++-------- .../templates/mongodb-enterprise-tests.yaml | 3 +- scripts/evergreen/e2e/single_e2e.sh | 71 +-------- scripts/evergreen/setup_minikube_host.sh | 2 +- 5 files changed, 86 insertions(+), 138 deletions(-) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index 5d3ad081a..05e5557cb 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -345,7 +345,6 @@ functions: shell: bash working_dir: src/github.com/mongodb/mongodb-kubernetes script: | - source .generated/context.export.env scripts/evergreen/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py create # The additional switch is needed, since we now have created the needed OM exports. - *switch_context @@ -357,8 +356,7 @@ functions: shell: bash working_dir: src/github.com/mongodb/mongodb-kubernetes script: | - source .generated/context.export.env - scripts/evergreen/e2e/setup_cloud_qa.py delete + scripts/evergreen/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py delete dump_diagnostic_information_from_all_namespaces: - command: subprocess.exec diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_docker_auth.sh index 761a9ff84..63c0078a8 100755 --- a/scripts/dev/configure_docker_auth.sh +++ b/scripts/dev/configure_docker_auth.sh @@ -8,24 +8,71 @@ source scripts/funcs/checks source scripts/funcs/printing source scripts/funcs/kubernetes -# Detect available container runtime -detect_container_runtime() { - if command -v podman &> /dev/null && (podman info &> /dev/null || sudo podman info &> /dev/null); then - CONTAINER_RUNTIME="podman" - # Use root's auth.json since minikube uses sudo podman - CONFIG_PATH="/root/.config/containers/auth.json" +# Parse command line arguments +CONTAINER_RUNTIME="${1:-docker}" + +# Validate and set up container runtime configuration +setup_container_runtime() { + case "${CONTAINER_RUNTIME}" in + "podman") + if ! command -v podman &> /dev/null; then + echo "Error: Podman is not available but was specified" + exit 1 + fi + USE_SUDO=true + CONFIG_PATH="/root/.config/containers/auth.json" + echo "Using Podman for container authentication (sudo mode)" + ;; + "docker") + if ! command -v docker &> /dev/null; then + echo "Error: Docker is not available but was specified" + exit 1 + fi + USE_SUDO=false + CONFIG_PATH="${HOME}/.docker/config.json" + echo "Using Docker for container authentication" + ;; + *) + echo "Error: Invalid container runtime '${CONTAINER_RUNTIME}'. Must be 'docker' or 'podman'" + exit 1 + ;; + esac + + # Create config directory + if [[ "$USE_SUDO" == "true" ]]; then sudo mkdir -p "$(dirname "${CONFIG_PATH}")" - echo "Using Podman for container authentication (sudo mode)" - return 0 - elif command -v docker &> /dev/null; then - CONTAINER_RUNTIME="docker" - CONFIG_PATH="${HOME}/.docker/config.json" + else mkdir -p "$(dirname "${CONFIG_PATH}")" - echo "Using Docker for container authentication" - return 0 + fi +} + +# Wrapper function to execute commands with or without sudo +exec_cmd() { + if [[ "$USE_SUDO" == "true" ]]; then + sudo env PATH="$PATH" "$@" + else + "$@" + fi +} + +# Wrapper function to read files with or without sudo +read_file() { + local file="$1" + if [[ "$USE_SUDO" == "true" ]]; then + sudo cat "$file" + else + cat "$file" + fi +} + +# Wrapper function to write files with or without sudo +write_file() { + local content="$1" + local file="$2" + if [[ "$USE_SUDO" == "true" ]]; then + echo "$content" | sudo tee "$file" > /dev/null else - echo "Error: Neither Docker nor Podman is available" - exit 1 + echo "$content" > "$file" fi } @@ -59,25 +106,17 @@ check_docker_daemon_is_running() { } remove_element() { - config_option="${1}" - tmpfile=$(mktemp) + local config_option="$1" + local tmpfile=$(mktemp) # Initialize config file if it doesn't exist if [[ ! -f "${CONFIG_PATH}" ]]; then - if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then - echo '{}' | sudo tee "${CONFIG_PATH}" > /dev/null - else - echo '{}' > "${CONFIG_PATH}" - fi + write_file '{}' "${CONFIG_PATH}" fi - if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then - sudo "${PROJECT_DIR:-.}/bin/jq" 'del(.'"${config_option}"')' "${CONFIG_PATH}" >"${tmpfile}" - sudo cp "${tmpfile}" "${CONFIG_PATH}" - else - "${PROJECT_DIR:-.}/bin/jq" 'del(.'"${config_option}"')' "${CONFIG_PATH}" >"${tmpfile}" - cp "${tmpfile}" "${CONFIG_PATH}" - fi + # Remove the specified element using jq + exec_cmd jq 'del(.'"${config_option}"')' "${CONFIG_PATH}" > "${tmpfile}" + exec_cmd cp "${tmpfile}" "${CONFIG_PATH}" rm "${tmpfile}" } @@ -87,7 +126,7 @@ container_login() { local registry="$2" if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then - sudo podman login --authfile "${CONFIG_PATH}" --username "${username}" --password-stdin "${registry}" + exec_cmd podman login --authfile "${CONFIG_PATH}" --username "${username}" --password-stdin "${registry}" else docker login --username "${username}" --password-stdin "${registry}" fi @@ -96,29 +135,20 @@ container_login() { # This is the script which performs container authentication to different registries that we use (so far ECR and RedHat) # As the result of this login the config file will have all the 'auth' information necessary to work with container registries -# Detect container runtime and set appropriate config path -detect_container_runtime +setup_container_runtime check_docker_daemon_is_running # Initialize config file if it doesn't exist if [[ ! -f "${CONFIG_PATH}" ]]; then - if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then - echo '{}' | sudo tee "${CONFIG_PATH}" > /dev/null - else - echo '{}' > "${CONFIG_PATH}" - fi + write_file '{}' "${CONFIG_PATH}" fi if [[ -f "${CONFIG_PATH}" ]]; then if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then # Check if login is actually required by making a HEAD request to ECR using existing credentials echo "Checking if container registry credentials are valid..." - if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then - ecr_auth=$(sudo "${PROJECT_DIR:-.}/bin/jq" -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") - else - ecr_auth=$("${PROJECT_DIR:-.}/bin/jq" -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") - fi + ecr_auth=$(exec_cmd jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") if [[ -n "${ecr_auth}" ]]; then http_status=$(curl --head -s -o /dev/null -w "%{http_code}" --max-time 3 "https://268558157000.dkr.ecr.us-east-1.amazonaws.com/v2/dev/mongodb-kubernetes/manifests/latest" \ @@ -138,20 +168,11 @@ if [[ -f "${CONFIG_PATH}" ]]; then # There could be some leftovers on Evergreen (Docker-specific, skip for Podman) if [[ "${CONTAINER_RUNTIME}" == "docker" ]]; then - if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then - if sudo grep -q "credsStore" "${CONFIG_PATH}"; then - remove_element "credsStore" - fi - if sudo grep -q "credHelpers" "${CONFIG_PATH}"; then - remove_element "credHelpers" - fi - else - if grep -q "credsStore" "${CONFIG_PATH}"; then - remove_element "credsStore" - fi - if grep -q "credHelpers" "${CONFIG_PATH}"; then - remove_element "credHelpers" - fi + if exec_cmd grep -q "credsStore" "${CONFIG_PATH}"; then + remove_element "credsStore" + fi + if exec_cmd grep -q "credHelpers" "${CONFIG_PATH}"; then + remove_element "credHelpers" fi fi fi @@ -164,7 +185,7 @@ aws ecr get-login-password --region "us-east-1" | container_login "AWS" "2685581 # by default docker tries to store credentials in an external storage (e.g. OS keychain) - not in the config.json # We need to store it as base64 string in config.json instead so we need to remove the "credsStore" element # This is Docker-specific behavior, Podman stores credentials directly in auth.json -if [[ "${CONTAINER_RUNTIME}" == "docker" ]] && (([[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]] && sudo grep -q "credsStore" "${CONFIG_PATH}") || ([[ "${CONFIG_PATH}" != "/root/.config/containers/auth.json" ]] && grep -q "credsStore" "${CONFIG_PATH}")); then +if [[ "${CONTAINER_RUNTIME}" == "docker" ]] && exec_cmd grep -q "credsStore" "${CONFIG_PATH}"; then remove_element "credsStore" # login again to store the credentials into the config.json @@ -179,13 +200,8 @@ if [[ -n "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON:-}" ]]; then quay_io_auth_file=$(mktemp) config_tmp=$(mktemp) echo "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON}" | base64 -d > "${quay_io_auth_file}" - if [[ "${CONFIG_PATH}" == "/root/.config/containers/auth.json" ]]; then - sudo "${PROJECT_DIR:-.}/bin/jq" -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" - sudo mv "${config_tmp}" "${CONFIG_PATH}" - else - jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" - mv "${config_tmp}" "${CONFIG_PATH}" - fi + exec_cmd jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" + exec_cmd mv "${config_tmp}" "${CONFIG_PATH}" rm "${quay_io_auth_file}" fi diff --git a/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml b/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml index 7d29f8643..0b6e72b1b 100644 --- a/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml +++ b/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml @@ -53,8 +53,9 @@ spec: secretName: test-pod-multi-cluster-config {{ end }} containers: - - image: registry.k8s.io/pause:3.9 + - image: public.ecr.aws/docker/library/busybox:1.37.0 name: keepalive + command: ["/bin/sh", "-c", "while true; do sleep 3600; done"] volumeMounts: - name: results mountPath: /tmp/results diff --git a/scripts/evergreen/e2e/single_e2e.sh b/scripts/evergreen/e2e/single_e2e.sh index 25069e01b..1adb8476c 100755 --- a/scripts/evergreen/e2e/single_e2e.sh +++ b/scripts/evergreen/e2e/single_e2e.sh @@ -213,75 +213,8 @@ run_tests() { echo # We need to make sure to access this file after the test has finished - # The /tmp/results directory is shared between containers via a volume - - echo "Attempting to copy myreport.xml (pytest XML report)..." - - # Try multiple approaches to get the XML file - xml_copied=false - - # Approach 1: Copy from keepalive container (should work since volume is shared) - echo "Attempt 1: Copying from keepalive container..." - if kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml -c keepalive 2>/dev/null; then - echo "Successfully copied myreport.xml from keepalive container" - xml_copied=true - else - echo "Failed to copy from keepalive container" - fi - - # Approach 2: Copy from test container (if still available) - if [[ "$xml_copied" == "false" ]]; then - echo "Attempt 2: Copying from test container..." - if kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml -c mongodb-enterprise-operator-tests 2>/dev/null; then - echo "Successfully copied myreport.xml from test container" - xml_copied=true - else - echo "Failed to copy from test container" - fi - fi - - # Approach 3: Try to debug and show what files exist - if [[ "$xml_copied" == "false" ]]; then - echo "Attempt 3: Debugging - checking what files exist..." - # Try to list files using the test container first - if kubectl --context "${test_pod_context}" -n "${NAMESPACE}" exec "${TEST_APP_PODNAME}" -c mongodb-enterprise-operator-tests -- ls -la /tmp/results/ 2>/dev/null; then - echo "Files found in /tmp/results/ from test container" - else - echo "Cannot list files from test container (likely terminated)" - fi - - # Try a wildcard copy to get any XML files - echo "Attempting wildcard copy of any XML files..." - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/ logs/tmp_results -c keepalive 2>/dev/null || true - if [[ -d logs/tmp_results ]]; then - echo "Contents of copied results directory:" - ls -la logs/tmp_results/ || true - # Move any XML files to the expected location - find logs/tmp_results/ -name "*.xml" -exec cp {} logs/myreport.xml \; 2>/dev/null && xml_copied=true - rm -rf logs/tmp_results - fi - fi - - if [[ "$xml_copied" == "true" ]]; then - echo "Successfully obtained myreport.xml" - else - echo "Failed to obtain myreport.xml through any method" - fi - - echo "Attempting to copy diagnostics..." - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs -c keepalive || true - - # Debug: Check what files were actually copied - echo "Contents of logs directory after copy attempts:" - ls -la logs/ || true - echo "Checking if myreport.xml exists and its size:" - if [[ -f logs/myreport.xml ]]; then - echo "myreport.xml exists, size: $(wc -c < logs/myreport.xml) bytes" - echo "First few lines of myreport.xml:" - head -5 logs/myreport.xml || true - else - echo "myreport.xml does not exist in logs directory" - fi + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" -c keepalive cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" -c keepalive cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs status="$(kubectl --context "${test_pod_context}" get pod "${TEST_APP_PODNAME}" -n "${NAMESPACE}" -o jsonpath="{ .status }" | jq -r '.containerStatuses[] | select(.name == "mongodb-enterprise-operator-tests")'.state.terminated.reason)" [[ "${status}" == "Completed" ]] diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index 1cd174f9b..b1d3dd815 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -50,7 +50,7 @@ run_setup_step "jq Setup" "scripts/evergreen/setup_jq.sh" run_setup_step "Minikube Host Setup with Container Runtime Detection" "scripts/minikube/setup_minikube_host.sh" -run_setup_step "Container Registry Authentication" "scripts/dev/configure_docker_auth.sh" +run_setup_step "Container Registry Authentication" "scripts/dev/configure_docker_auth.sh podman" # The minikube cluster is already started by the setup_minikube_host.sh script echo "" From e949a1c83ff45dac911c9b576a22daefd4641bdd Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 5 Aug 2025 15:47:03 +0200 Subject: [PATCH 065/164] handle pyenv exists --- scripts/dev/recreate_python_venv.sh | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/scripts/dev/recreate_python_venv.sh b/scripts/dev/recreate_python_venv.sh index 36ff1e7f5..6594b3fb2 100755 --- a/scripts/dev/recreate_python_venv.sh +++ b/scripts/dev/recreate_python_venv.sh @@ -7,8 +7,27 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh install_pyenv() { + # Check if pyenv directory exists first + if [[ -d "${HOME}/.pyenv" ]]; then + echo "pyenv directory already exists, setting up environment..." >&2 + export PYENV_ROOT="${HOME}/.pyenv" + export PATH="${PYENV_ROOT}/bin:${PATH}" + + # Initialize pyenv in current shell + if command -v pyenv &> /dev/null; then + eval "$(pyenv init --path)" + eval "$(pyenv init -)" + echo "pyenv already installed and initialized" >&2 + return 0 + else + echo "pyenv directory exists but binary not working, reinstalling..." >&2 + rm -rf "${HOME}/.pyenv" + fi + fi + + # Check if pyenv command is available in PATH if command -v pyenv &> /dev/null; then - echo "pyenv already installed" >&2 + echo "pyenv already available in PATH" >&2 return 0 fi From fbb143202d283f2702db496db7493f6cd38bd31f Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 5 Aug 2025 16:06:36 +0200 Subject: [PATCH 066/164] fix for containerruntime --- scripts/dev/configure_docker_auth.sh | 2 +- scripts/evergreen/setup_minikube_host.sh | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_docker_auth.sh index 63c0078a8..7647fa8a4 100755 --- a/scripts/dev/configure_docker_auth.sh +++ b/scripts/dev/configure_docker_auth.sh @@ -9,7 +9,7 @@ source scripts/funcs/printing source scripts/funcs/kubernetes # Parse command line arguments -CONTAINER_RUNTIME="${1:-docker}" +CONTAINER_RUNTIME="${CONTAINER_RUNTIME-"docker"}" # Validate and set up container runtime configuration setup_container_runtime() { diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index b1d3dd815..5424ecfb4 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -50,7 +50,8 @@ run_setup_step "jq Setup" "scripts/evergreen/setup_jq.sh" run_setup_step "Minikube Host Setup with Container Runtime Detection" "scripts/minikube/setup_minikube_host.sh" -run_setup_step "Container Registry Authentication" "scripts/dev/configure_docker_auth.sh podman" +export CONTAINER_RUNTIME=podman +run_setup_step "Container Registry Authentication" "scripts/dev/configure_docker_auth.sh" # The minikube cluster is already started by the setup_minikube_host.sh script echo "" From aa9c64a67e640cbfd09e340d0e049b932eee95a9 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 5 Aug 2025 16:39:56 +0200 Subject: [PATCH 067/164] fix for containerruntime --- scripts/dev/contexts/e2e_smoke_ibm_power | 1 + scripts/dev/contexts/e2e_smoke_ibm_z | 1 + 2 files changed, 2 insertions(+) diff --git a/scripts/dev/contexts/e2e_smoke_ibm_power b/scripts/dev/contexts/e2e_smoke_ibm_power index 98f1c22df..2b1000cb6 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_power +++ b/scripts/dev/contexts/e2e_smoke_ibm_power @@ -18,6 +18,7 @@ export CUSTOM_MDB_VERSION=6.0.5-ent export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=minikube export CLUSTER_TYPE=minikube +export CONTAINER_RUNTIME=podman # TODO: change once we have image building export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z index 98f1c22df..2b1000cb6 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_z +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -18,6 +18,7 @@ export CUSTOM_MDB_VERSION=6.0.5-ent export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=minikube export CLUSTER_TYPE=minikube +export CONTAINER_RUNTIME=podman # TODO: change once we have image building export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" From bc23827a13c10cd8067dc4bd93f7ced239a9dba4 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 10:32:24 +0200 Subject: [PATCH 068/164] Remove agent unused functions --- scripts/release/atomic_pipeline.py | 123 ----------------------------- scripts/release/main.py | 2 - 2 files changed, 125 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 93f5492cf..303db2454 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -525,57 +525,6 @@ def build_agent_pipeline( ) -def build_multi_arch_agent_in_sonar( - build_configuration: BuildConfiguration, - image_version, - tools_version, -): - """ - Creates the multi-arch non-operator suffixed version of the agent. - This is a drop-in replacement for the agent - release from MCO. - This should only be called during releases. - Which will lead to a release of the multi-arch - images to quay and ecr. - """ - - logger.info(f"building multi-arch base image for: {image_version}") - args = { - "version": image_version, - "tools_version": tools_version, - } - - arch_arm = { - "agent_distro": "amzn2_aarch64", - "tools_distro": get_tools_distro(tools_version=tools_version)["arm"], - "architecture": "arm64", - } - arch_amd = { - "agent_distro": "rhel9_x86_64", - "tools_distro": get_tools_distro(tools_version=tools_version)["amd"], - "architecture": "amd64", - } - - new_rhel_tool_version = "100.10.0" - if Version(tools_version) >= Version(new_rhel_tool_version): - arch_arm["tools_distro"] = "rhel93-aarch64" - arch_amd["tools_distro"] = "rhel93-x86_64" - - joined_args = [args | arch_amd] - - # Only include arm64 if we shouldn't skip it - if not should_skip_arm64(): - joined_args.append(args | arch_arm) - - build_image_generic( - image_name="mongodb-agent-ubi", - dockerfile_path="docker/mongodb-agent-non-matrix/Dockerfile", - build_configuration=build_configuration, - is_multi_arch=True, - multi_arch_args_list=joined_args, - ) - - def build_agent_default_case(build_configuration: BuildConfiguration): """ Build the agent only for the latest operator for patches and operator releases. @@ -619,78 +568,6 @@ def build_agent_default_case(build_configuration: BuildConfiguration): queue_exception_handling(tasks_queue) -def build_agent_on_agent_bump(build_configuration: BuildConfiguration): - """ - Build the agent matrix (operator version x agent version), triggered by PCT. - - We have three cases where we need to build the agent: - - e2e test runs - - operator releases - - OM/CM bumps via PCT - - We don’t require building a full matrix on e2e test runs and operator releases. - "Operator releases" and "e2e test runs" require only the latest operator x agents - - In OM/CM bumps, we release a new agent which we potentially require to release to older operators as well. - This function takes care of that. - """ - release = load_release_file() - is_release = build_configuration.is_release_step_executed() - - if build_configuration.all_agents: - # We need to release [all agents x latest operator] on operator releases to make e2e tests work - # This was changed previously in https://github.com/mongodb/mongodb-kubernetes/pull/3960 - agent_versions_to_build = gather_all_supported_agent_versions(release) - else: - # we only need to release the latest images, we don't need to re-push old images, as we don't clean them up anymore. - agent_versions_to_build = gather_latest_agent_versions(release) - - legacy_agent_versions_to_build = release["supportedImages"]["mongodb-agent"]["versions"] - - tasks_queue = Queue() - max_workers = 1 - if build_configuration.parallel: - max_workers = None - if build_configuration.parallel_factor > 0: - max_workers = build_configuration.parallel_factor - with ProcessPoolExecutor(max_workers=max_workers) as executor: - logger.info(f"running with factor of {max_workers}") - - # We need to regularly push legacy agents, otherwise ecr lifecycle policy will expire them. - # We only need to push them once in a while to ecr, so no quay required - if not is_release: - for legacy_agent in legacy_agent_versions_to_build: - tasks_queue.put( - executor.submit( - build_multi_arch_agent_in_sonar, - build_configuration, - legacy_agent, - # we assume that all legacy agents are build using that tools version - "100.9.4", - ) - ) - - for agent_version in agent_versions_to_build: - # We don't need to keep create and push the same image on every build. - # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. - if build_configuration.is_release_step_executed() or build_configuration.all_agents: - tasks_queue.put( - executor.submit( - build_multi_arch_agent_in_sonar, - build_configuration, - agent_version[0], - agent_version[1], - ) - ) - for operator_version in get_supported_operator_versions(): - logger.info(f"Building Agent versions: {agent_version} for Operator versions: {operator_version}") - _build_agent_operator( - agent_version, build_configuration, executor, operator_version, tasks_queue, is_release - ) - - queue_exception_handling(tasks_queue) - - def queue_exception_handling(tasks_queue): exceptions_found = False for task in tasks_queue.queue: diff --git a/scripts/release/main.py b/scripts/release/main.py index 169a81503..aa7132488 100644 --- a/scripts/release/main.py +++ b/scripts/release/main.py @@ -20,7 +20,6 @@ from scripts.evergreen.release.images_signing import mongodb_artifactory_login from scripts.release.atomic_pipeline import ( build_agent_default_case, - build_agent_on_agent_bump, build_CLI_SBOM, build_database_image, build_init_appdb, @@ -61,7 +60,6 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: "upgrade-hook": build_upgrade_hook_image, "operator-quick": build_operator_image_patch, "database": build_database_image, - "agent-pct": build_agent_on_agent_bump, "agent": build_agent_default_case, # # Init images From 03062c32e858b54c173b802dbc75b666ed4f5e3a Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 10:33:03 +0200 Subject: [PATCH 069/164] Remove debug --- scripts/release/build_configuration.py | 1 - scripts/release/main.py | 1 - 2 files changed, 2 deletions(-) diff --git a/scripts/release/build_configuration.py b/scripts/release/build_configuration.py index b62994d0e..a372de08a 100644 --- a/scripts/release/build_configuration.py +++ b/scripts/release/build_configuration.py @@ -15,7 +15,6 @@ class BuildConfiguration: platforms: Optional[List[str]] = None sign: bool = False all_agents: bool = False - debug: bool = True def is_release_step_executed(self) -> bool: return self.scenario == BuildScenario.RELEASE diff --git a/scripts/release/main.py b/scripts/release/main.py index aa7132488..f9f3def9f 100644 --- a/scripts/release/main.py +++ b/scripts/release/main.py @@ -191,7 +191,6 @@ def build_config_from_args(args): version=version, base_registry=registry, parallel=args.parallel, - debug=args.debug, # TODO: is debug used ? platforms=platforms, sign=sign, all_agents=all_agents, From 4d4e4e9c2aee6331c4935294fd31d797b096a550 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 10:34:12 +0200 Subject: [PATCH 070/164] Remove args iteration for multi platform --- .../Dockerfile | 3 +- .../Dockerfile | 3 +- scripts/release/atomic_pipeline.py | 71 ++++++------------- 3 files changed, 27 insertions(+), 50 deletions(-) diff --git a/docker/mongodb-kubernetes-readinessprobe/Dockerfile b/docker/mongodb-kubernetes-readinessprobe/Dockerfile index a2f3159b4..7466ece2b 100644 --- a/docker/mongodb-kubernetes-readinessprobe/Dockerfile +++ b/docker/mongodb-kubernetes-readinessprobe/Dockerfile @@ -4,7 +4,8 @@ WORKDIR /go/src ADD . . ARG TARGETARCH -RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} go build -a -o /data/scripts/readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +ARG TARGETOS +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -a -o /data/scripts/readinessprobe ./mongodb-community-operator/cmd/readiness/main.go FROM registry.access.redhat.com/ubi9/ubi-minimal diff --git a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile index 5005f5801..31aa3a1ac 100644 --- a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile +++ b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile @@ -4,7 +4,8 @@ WORKDIR /go/src ADD . . ARG TARGETARCH -RUN CGO_ENABLED=0 GOOS=linux GOARCH=${TARGETARCH} go build -a -o /data/scripts/version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go +ARG TARGETOS +RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -a -o /data/scripts/version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go FROM registry.access.redhat.com/ubi9/ubi-minimal diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 303db2454..c82d43649 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -364,8 +364,6 @@ def build_image_generic( dockerfile_path: str, build_configuration: BuildConfiguration, extra_args: dict | None = None, - multi_arch_args_list: list[dict] | None = None, - is_multi_arch: bool = False, ): """ Build one or more platform-specific images, then (optionally) @@ -373,25 +371,22 @@ def build_image_generic( """ registry = build_configuration.base_registry - args_list = multi_arch_args_list or [extra_args or {}] - version = args_list[0].get("version", "") - platforms = [args.get("architecture") for args in args_list] - - for base_args in args_list: - # merge in the registry without mutating caller’s dict - build_args = {**base_args, "quay_registry": registry} - logger.debug(f"Build args: {build_args}") - - for arch in platforms: - logger.debug(f"Building {image_name} for arch={arch}") - logger.debug(f"build image generic - registry={registry}") - pipeline_process_image( - image_name=image_name, - dockerfile_path=dockerfile_path, - build_configuration=build_configuration, - dockerfile_args=build_args, - with_sbom=False, # TODO: specify no SBOM, write folllow up tasks and todo - ) + args_list = extra_args or {} + version = args_list.get("version", "") + + # merge in the registry without mutating caller’s dict + build_args = {**args_list, "quay_registry": registry} + logger.debug(f"Build args: {build_args}") + + logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") + logger.debug(f"build image generic - registry={registry}") + pipeline_process_image( + image_name=image_name, + dockerfile_path=dockerfile_path, + build_configuration=build_configuration, + dockerfile_args=build_args, + with_sbom=False, # TODO: specify no SBOM, write folllow up tasks and todo + ) if build_configuration.sign: sign_image(registry, version) @@ -441,41 +436,21 @@ def build_community_image(build_configuration: BuildConfiguration, image_type: s image_name = "mongodb-kubernetes-operator-version-upgrade-post-start-hook" dockerfile_path = "docker/mongodb-kubernetes-upgrade-hook/Dockerfile" else: - raise ValueError(f"Unsupported image type: {image_type}") + raise ValueError(f"Unsupported community image type: {image_type}") version = build_configuration.version golang_version = os.getenv("GOLANG_VERSION", "1.24") - # Use only amd64 if we should skip arm64 builds - if should_skip_arm64(): - platforms = ["linux/amd64"] - logger.info("Skipping ARM64 builds for community image as this is running in EVG pipeline as a patch") - else: - platforms = build_configuration.platforms or ["linux/amd64", "linux/arm64"] - - # Extract architectures from platforms for build args - architectures = [platform.split("/")[-1] for platform in platforms] - multi_arch_args_list = [] - - for arch in architectures: - arch_args = { - "version": version, - "GOLANG_VERSION": golang_version, - "architecture": arch, - "TARGETARCH": arch, # TODO: redundant ? - } - multi_arch_args_list.append(arch_args) - - # Create a copy of build_configuration with overridden platforms - build_config_copy = copy(build_configuration) - build_config_copy.platforms = platforms + extra_args = { + "version": version, + "GOLANG_VERSION": golang_version, + } build_image_generic( image_name=image_name, dockerfile_path=dockerfile_path, - build_configuration=build_config_copy, - multi_arch_args_list=multi_arch_args_list, - is_multi_arch=True, + build_configuration=build_configuration, + extra_args=extra_args, ) From 7c9967a95edf4e30ecfc52e9017b823f9e9dd9bd Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 10:34:48 +0200 Subject: [PATCH 071/164] Cleanup unused stuff --- scripts/release/atomic_pipeline.py | 23 +---------------------- scripts/release/main.py | 4 +--- 2 files changed, 2 insertions(+), 25 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index c82d43649..125f3fcdb 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -9,7 +9,7 @@ from concurrent.futures import ProcessPoolExecutor from copy import copy from queue import Queue -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple, Union import requests import semver @@ -35,14 +35,8 @@ DEFAULT_NAMESPACE = "default" -def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: - if value is None: - return [] - if isinstance(value, str): - return [e.strip() for e in value.split(",")] - return value def get_tools_distro(tools_version: str) -> Dict[str, str]: @@ -57,11 +51,6 @@ def is_running_in_evg_pipeline(): return os.getenv("RUNNING_IN_EVG", "") == "true" -def is_running_in_patch(): - is_patch = os.environ.get("is_patch") - return is_patch is not None and is_patch.lower() == "true" - - def load_release_file() -> Dict: with open("release.json") as release: return json.load(release) @@ -207,7 +196,6 @@ def build_operator_image(build_configuration: BuildConfiguration): "version": build_configuration.version, "log_automation_config_diff": log_automation_config_diff, "test_suffix": test_suffix, - "debug": build_configuration.debug, } logger.info(f"Building Operator args: {args}") @@ -230,7 +218,6 @@ def build_database_image(build_configuration: BuildConfiguration): """ Builds a new database image. """ - release = load_release_file() args = {"version": build_configuration.version} build_image_generic( image_name="mongodb-kubernetes-database", @@ -262,14 +249,6 @@ def build_CLI_SBOM(build_configuration: BuildConfiguration): generate_sbom_for_cli(version, platform) -def should_skip_arm64(): - """ - Determines if arm64 builds should be skipped based on environment. - Returns True if running in Evergreen pipeline as a patch. - """ - return is_running_in_evg_pipeline() and is_running_in_patch() - - @TRACER.start_as_current_span("sign_image_in_repositories") def sign_image_in_repositories(args: Dict[str, str], arch: str = None): span = trace.get_current_span() diff --git a/scripts/release/main.py b/scripts/release/main.py index f9f3def9f..2081c8fab 100644 --- a/scripts/release/main.py +++ b/scripts/release/main.py @@ -1,8 +1,7 @@ import argparse import os import sys -import time -from typing import Callable, Dict, Iterable, List, Optional +from typing import Callable, Dict from opentelemetry import context, trace from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( @@ -17,7 +16,6 @@ from opentelemetry.trace import NonRecordingSpan, SpanContext, TraceFlags from lib.base_logger import logger -from scripts.evergreen.release.images_signing import mongodb_artifactory_login from scripts.release.atomic_pipeline import ( build_agent_default_case, build_CLI_SBOM, From a7c63c9aa6d23b15a7e5937d1e8417669c3ab377 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 10:35:00 +0200 Subject: [PATCH 072/164] Cleanup --- scripts/release/atomic_pipeline.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 125f3fcdb..c3e14a1b3 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -17,9 +17,6 @@ from packaging.version import Version from lib.base_logger import logger -from scripts.evergreen.release.agent_matrix import ( - get_supported_operator_versions, -) from scripts.evergreen.release.images_signing import ( sign_image, verify_signature, @@ -32,7 +29,6 @@ from .optimized_operator_build import build_operator_image_fast TRACER = trace.get_tracer("evergreen-agent") -DEFAULT_NAMESPACE = "default" @@ -507,10 +503,10 @@ def build_agent_default_case(build_configuration: BuildConfiguration): with ProcessPoolExecutor(max_workers=max_workers) as executor: logger.info(f"running with factor of {max_workers}") print(f"======= Versions to build {agent_versions_to_build} =======") - for agent_version in agent_versions_to_build: + for idx, agent_version in enumerate(agent_versions_to_build): # We don't need to keep create and push the same image on every build. # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. - print(f"======= Building Agent {agent_version} =======") + print(f"======= Building Agent {agent_version} ({idx}/{len(agent_versions_to_build)})") _build_agent_operator( agent_version, build_configuration, From 742e784e3063b067bf67f01a00cd8b689920d851 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 10:39:40 +0200 Subject: [PATCH 073/164] Rename file --- scripts/release/{main.py => pipeline_main.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename scripts/release/{main.py => pipeline_main.py} (100%) diff --git a/scripts/release/main.py b/scripts/release/pipeline_main.py similarity index 100% rename from scripts/release/main.py rename to scripts/release/pipeline_main.py From 1f0a21be4776b8bce6c356b2303e6c4d202d39ec Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 10:45:04 +0200 Subject: [PATCH 074/164] Remove cli sbom --- scripts/release/atomic_pipeline.py | 23 ----------------------- scripts/release/pipeline_main.py | 2 -- 2 files changed, 25 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index c3e14a1b3..8aa75fff2 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -222,29 +222,6 @@ def build_database_image(build_configuration: BuildConfiguration): extra_args=args, ) - -def build_CLI_SBOM(build_configuration: BuildConfiguration): - if not is_running_in_evg_pipeline(): - logger.info("Skipping SBOM Generation (enabled only for EVG)") - return - - if build_configuration.platforms is None or len(build_configuration.platforms) == 0: - platforms = ["linux/amd64", "linux/arm64", "darwin/arm64", "darwin/amd64"] - elif "arm64" in build_configuration.platforms: - platforms = ["linux/arm64", "darwin/arm64"] - elif "amd64" in build_configuration.platforms: - platforms = ["linux/amd64", "darwin/amd64"] - else: - logger.error(f"Unrecognized architectures {build_configuration.platforms}. Skipping SBOM generation") - return - - release = load_release_file() - version = release["mongodbOperator"] - - for platform in platforms: - generate_sbom_for_cli(version, platform) - - @TRACER.start_as_current_span("sign_image_in_repositories") def sign_image_in_repositories(args: Dict[str, str], arch: str = None): span = trace.get_current_span() diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 2081c8fab..08a293fdf 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -18,7 +18,6 @@ from lib.base_logger import logger from scripts.release.atomic_pipeline import ( build_agent_default_case, - build_CLI_SBOM, build_database_image, build_init_appdb, build_init_database, @@ -50,7 +49,6 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: """Returns a dictionary of image names that can be built.""" image_builders = { - "cli": build_CLI_SBOM, "test": build_tests_image, "operator": build_operator_image, "mco-test": build_mco_tests_image, From 813d539e2c936cafe9c9d58fe5d23affd6cc7b37 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 11:02:33 +0200 Subject: [PATCH 075/164] Renamed image building file --- .evergreen-functions.yml | 2 +- Makefile | 28 ++++++++++++++-------------- 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index a1d2a5539..55bedbafc 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -538,7 +538,7 @@ functions: shell: bash <<: *e2e_include_expansions_in_env working_dir: src/github.com/mongodb/mongodb-kubernetes - binary: scripts/evergreen/run_python.sh scripts/release/main.py --parallel ${image_name} + binary: scripts/evergreen/run_python.sh scripts/release/pipeline_main.py --parallel ${image_name} legacy_pipeline: - *switch_context diff --git a/Makefile b/Makefile index 73e43077d..086c2af70 100644 --- a/Makefile +++ b/Makefile @@ -75,13 +75,13 @@ operator: configure-operator build-and-push-operator-image # build-push, (todo) restart database database: aws_login - @ scripts/evergreen/run_python.sh scripts/release/main.py database + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py database readiness_probe: aws_login - @ scripts/evergreen/run_python.sh scripts/release/main.py readiness-probe + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py readiness-probe upgrade_hook: aws_login - @ scripts/evergreen/run_python.sh scripts/release/main.py upgrade-hook + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py upgrade-hook # ensures cluster is up, cleans Kubernetes + OM, build-push-deploy operator, # push-deploy database, create secrets, config map, resources etc @@ -90,7 +90,7 @@ full: build-and-push-images # build-push appdb image appdb: aws_login - @ scripts/evergreen/run_python.sh scripts/release/main.py --include appdb + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py --include appdb # runs the e2e test: make e2e test=e2e_sharded_cluster_pv. The Operator is redeployed before the test, the namespace is cleaned. # The e2e test image is built and pushed together with all main ones (operator, database, init containers) @@ -154,19 +154,19 @@ aws_cleanup: @ scripts/evergreen/prepare_aws.sh build-and-push-operator-image: aws_login - @ scripts/evergreen/run_python.sh scripts/release/main.py operator-quick + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py operator-quick build-and-push-database-image: aws_login @ scripts/dev/build_push_database_image build-and-push-test-image: aws_login build-multi-cluster-binary @ if [[ -z "$(local)" ]]; then \ - scripts/evergreen/run_python.sh scripts/release/main.py test; \ + scripts/evergreen/run_python.sh scripts/release/pipeline_main.py test; \ fi build-and-push-mco-test-image: aws_login @ if [[ -z "$(local)" ]]; then \ - scripts/evergreen/run_python.sh scripts/release/main.py mco-test; \ + scripts/evergreen/run_python.sh scripts/release/pipeline_main.py mco-test; \ fi build-multi-cluster-binary: @@ -181,27 +181,27 @@ build-and-push-images: build-and-push-operator-image appdb-init-image om-init-im build-and-push-init-images: appdb-init-image om-init-image database-init-image database-init-image: - @ scripts/evergreen/run_python.sh scripts/release/main.py init-database + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.puy init-database appdb-init-image: - @ scripts/evergreen/run_python.sh scripts/release/main.py init-appdb + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py init-appdb # Not setting a parallel-factor will default to 0 which will lead to using all CPUs, that can cause docker to die. # Here we are defaulting to 6, a higher value might work for you. agent-image: - @ scripts/evergreen/run_python.sh scripts/release/main.py --all-agents --parallel --parallel-factor 6 agent + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py --all-agents --parallel --parallel-factor 6 agent agent-image-slow: - @ scripts/evergreen/run_python.sh scripts/release/main.py --parallel-factor 1 agent + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py --parallel-factor 1 agent operator-image: - @ scripts/evergreen/run_python.sh scripts/release/main.py operator + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py operator om-init-image: - @ scripts/evergreen/run_python.sh scripts/release/main.py init-ops-manager + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py init-ops-manager om-image: - @ scripts/evergreen/run_python.sh scripts/release/main.py ops-manager + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py ops-manager configure-operator: @ scripts/dev/configure_operator.sh From c06061bcfa47e245d0c4e9456413f8fadf0162bd Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 11:28:37 +0200 Subject: [PATCH 076/164] Freeze python on whales --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 9461810cd..7264356ae 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,7 +34,7 @@ wrapt==1.17.2 botocore==1.39.4 boto3==1.39.4 python-frontmatter==1.1.0 -python-on-whales +python-on-whales==0.78.0 # from kubeobject freezegun==1.5.3 From 5f9d49a1812c78e096938b51ecabd880171c7ff0 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 11:29:26 +0200 Subject: [PATCH 077/164] Lint --- scripts/release/atomic_pipeline.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 8aa75fff2..4780e0340 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -222,6 +222,7 @@ def build_database_image(build_configuration: BuildConfiguration): extra_args=args, ) + @TRACER.start_as_current_span("sign_image_in_repositories") def sign_image_in_repositories(args: Dict[str, str], arch: str = None): span = trace.get_current_span() From f390dc9278b460b7e6092b112889c9a67dff975d Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 11:31:08 +0200 Subject: [PATCH 078/164] Remove everything SBOM related --- scripts/release/atomic_pipeline.py | 43 ------------------------------ 1 file changed, 43 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 4780e0340..f61c9689b 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -21,7 +21,6 @@ sign_image, verify_signature, ) -from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli from .build_configuration import BuildConfiguration from .build_context import BuildScenario @@ -59,7 +58,6 @@ def pipeline_process_image( build_configuration: BuildConfiguration, dockerfile_args: Dict[str, str] = None, build_path: str = ".", - with_sbom: bool = True, ): """Builds a Docker image with arguments defined in `args`.""" span = trace.get_current_span() @@ -83,46 +81,6 @@ def pipeline_process_image( build_path=build_path, ) - if with_sbom: - produce_sbom(dockerfile_args) - - -@TRACER.start_as_current_span("produce_sbom") -def produce_sbom(args): - span = trace.get_current_span() - if not is_running_in_evg_pipeline(): - logger.info("Skipping SBOM Generation (enabled only for EVG)") - return - - try: - image_pull_spec = args["quay_registry"] + args.get("ubi_suffix", "") - except KeyError: - logger.error(f"Could not find image pull spec. Args: {args}") - logger.error(f"Skipping SBOM generation") - return - - try: - image_tag = args["release_version"] - span.set_attribute("mck.release_version", image_tag) - except KeyError: - logger.error(f"Could not find image tag. Args: {args}") - logger.error(f"Skipping SBOM generation") - return - - image_pull_spec = f"{image_pull_spec}:{image_tag}" - print(f"Producing SBOM for image: {image_pull_spec} args: {args}") - - platform = "linux/amd64" - if "platform" in args: - if args["platform"] == "arm64": - platform = "linux/arm64" - elif args["platform"] == "amd64": - platform = "linux/amd64" - else: - raise ValueError(f"Unrecognized platform in {args}. Cannot proceed with SBOM generation") - - generate_sbom(image_pull_spec, platform) - def build_tests_image(build_configuration: BuildConfiguration): """ @@ -338,7 +296,6 @@ def build_image_generic( dockerfile_path=dockerfile_path, build_configuration=build_configuration, dockerfile_args=build_args, - with_sbom=False, # TODO: specify no SBOM, write folllow up tasks and todo ) if build_configuration.sign: From a47341d60632b53db39ee91490f9724ba9668a40 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 11:42:42 +0200 Subject: [PATCH 079/164] Lint --- scripts/release/atomic_pipeline.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index f61c9689b..fe29289dd 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -30,10 +30,6 @@ TRACER = trace.get_tracer("evergreen-agent") - - - - def get_tools_distro(tools_version: str) -> Dict[str, str]: new_rhel_tool_version = "100.10.0" default_distro = {"arm": "rhel90-aarch64", "amd": "rhel90-x86_64"} From 972b23c9aeacc347d6d0f5b309858a563253bf7e Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 13:36:24 +0200 Subject: [PATCH 080/164] Add TODO --- .evergreen-functions.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index 55bedbafc..7aec5c34d 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -540,6 +540,7 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes binary: scripts/evergreen/run_python.sh scripts/release/pipeline_main.py --parallel ${image_name} + # TODO: CLOUDP-335471 ; once all image builds are made with the new atomic pipeline, remove the following function legacy_pipeline: - *switch_context - command: shell.exec From 4ae40344da5258a161e3012e4cb9760b3719ec28 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Wed, 6 Aug 2025 13:57:46 +0200 Subject: [PATCH 081/164] Remove --all-agents --- Makefile | 2 +- scripts/release/build_configuration.py | 1 - scripts/release/pipeline_main.py | 10 +--------- 3 files changed, 2 insertions(+), 11 deletions(-) diff --git a/Makefile b/Makefile index 086c2af70..f473918c5 100644 --- a/Makefile +++ b/Makefile @@ -189,7 +189,7 @@ appdb-init-image: # Not setting a parallel-factor will default to 0 which will lead to using all CPUs, that can cause docker to die. # Here we are defaulting to 6, a higher value might work for you. agent-image: - @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py --all-agents --parallel --parallel-factor 6 agent + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py --parallel --parallel-factor 6 agent agent-image-slow: @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py --parallel-factor 1 agent diff --git a/scripts/release/build_configuration.py b/scripts/release/build_configuration.py index a372de08a..2228a6709 100644 --- a/scripts/release/build_configuration.py +++ b/scripts/release/build_configuration.py @@ -14,7 +14,6 @@ class BuildConfiguration: parallel_factor: int = 0 platforms: Optional[List[str]] = None sign: bool = False - all_agents: bool = False def is_release_step_executed(self) -> bool: return self.scenario == BuildScenario.RELEASE diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 08a293fdf..5b4cc1195 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -133,13 +133,7 @@ def main(): "--registry", help="Override the base registry instead of resolving from build scenario", ) - - # Agent specific arguments - parser.add_argument( - "--all-agents", - action="store_true", - help="Build all agent variants instead of only the latest.", - ) + # For agent builds parser.add_argument( "--parallel-factor", default=0, @@ -180,7 +174,6 @@ def build_config_from_args(args): version = args.version or build_context.get_version() registry = args.registry or build_context.get_base_registry() sign = args.sign or build_context.signing_enabled - all_agents = args.all_agents or bool(os.environ.get("all_agents", False)) return BuildConfiguration( scenario=scenario, @@ -189,7 +182,6 @@ def build_config_from_args(args): parallel=args.parallel, platforms=platforms, sign=sign, - all_agents=all_agents, parallel_factor=args.parallel_factor, ) From af288079d78dbed667a3f12c6b91d02667c83256 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 7 Aug 2025 13:33:13 +0200 Subject: [PATCH 082/164] refactoring and cleanups on architecture and minikube handling --- .evergreen-functions.yml | 13 +++-- .evergreen.yml | 2 +- Makefile | 2 +- .../content/agent-launcher-lib.sh | 31 ++++++++---- ...er_auth.sh => configure_container_auth.sh} | 49 ++----------------- scripts/dev/prepare_local_e2e_run.sh | 2 +- scripts/evergreen/setup_aws.sh | 32 ++---------- scripts/evergreen/setup_jq.sh | 29 +---------- scripts/evergreen/setup_kind.sh | 8 +-- scripts/evergreen/setup_kubectl.sh | 27 +--------- scripts/evergreen/setup_minikube_host.sh | 5 +- scripts/funcs/install | 35 +++++++++++++ scripts/minikube/install-minikube.sh | 27 ++-------- scripts/minikube/minikube_host.sh | 21 ++++---- scripts/minikube/setup_minikube_host.sh | 3 +- 15 files changed, 104 insertions(+), 182 deletions(-) rename scripts/dev/{configure_docker_auth.sh => configure_container_auth.sh} (77%) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index 05e5557cb..6deabc65c 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -231,7 +231,7 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes add_to_path: - ${workdir}/bin - binary: scripts/dev/configure_docker_auth.sh + binary: scripts/dev/configure_container_auth.sh setup_evg_host: &setup_evg_host command: subprocess.exec @@ -280,10 +280,17 @@ functions: command: scripts/evergreen/setup_minikube_host.sh prune_docker_resources: - - command: subprocess.exec + - command: shell.exec type: setup params: - command: "docker system prune -a -f" + shell: bash + script: | + if command -v docker >/dev/null 2>&1; then + echo "Docker found, pruning docker resources..." + docker system prune -a -f + else + echo "Docker not found, skipping docker resource pruning" + fi # the task configures the set of tools necessary for any task working with K8 cluster: # installs kubectl, jq, kind (if necessary), configures docker authentication diff --git a/.evergreen.yml b/.evergreen.yml index d2e5def80..f46e46b5a 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -140,7 +140,7 @@ variables: - &teardown_group teardown_group: - - func: teardown_kubernetes_environment + - func: prune_docker_resources - func: run_retry_script - &base_om7_dependency diff --git a/Makefile b/Makefile index 9c45c1c3b..9bf87a81e 100644 --- a/Makefile +++ b/Makefile @@ -147,7 +147,7 @@ ac: # in parallel and both call 'aws_login') then Docker login may return an error "Error saving credentials:..The # specified item already exists in the keychain". Seems this allows to ignore the error aws_login: - @ scripts/dev/configure_docker_auth.sh + @ scripts/dev/configure_container_auth.sh # cleans up aws resources, including s3 buckets which are older than 5 hours aws_cleanup: diff --git a/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh b/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh index 01060832b..aba8ca152 100755 --- a/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh +++ b/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh @@ -91,15 +91,28 @@ download_agent() { AGENT_VERSION="${MDB_AGENT_VERSION}" fi - if [ "$(arch)" = "x86_64" ]; then - AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" - elif [ "$(arch)" = "arm64" ]; then - AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" - elif [ "$(arch)" = "ppc64le" ]; then - AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" - elif [ "$(arch)" = "s390x" ]; then - AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz" - fi + # Detect architecture for agent download + local detected_arch + detected_arch=$(uname -m) + + case "${detected_arch}" in + x86_64) + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" + ;; + aarch64|arm64) + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" + ;; + ppc64le) + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" + ;; + s390x) + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz" + ;; + *) + script_log "Error: Unsupported architecture for MongoDB agent: ${detected_arch}" + exit 1 + ;; + esac script_log "Downloading Agent version: ${AGENT_VERSION}" script_log "Downloading a Mongodb Agent from ${base_url:?}" diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_container_auth.sh similarity index 77% rename from scripts/dev/configure_docker_auth.sh rename to scripts/dev/configure_container_auth.sh index 7647fa8a4..9464ba409 100755 --- a/scripts/dev/configure_docker_auth.sh +++ b/scripts/dev/configure_container_auth.sh @@ -8,11 +8,9 @@ source scripts/funcs/checks source scripts/funcs/printing source scripts/funcs/kubernetes -# Parse command line arguments CONTAINER_RUNTIME="${CONTAINER_RUNTIME-"docker"}" -# Validate and set up container runtime configuration -setup_container_runtime() { +setup_validate_container_runtime() { case "${CONTAINER_RUNTIME}" in "podman") if ! command -v podman &> /dev/null; then @@ -38,7 +36,6 @@ setup_container_runtime() { ;; esac - # Create config directory if [[ "$USE_SUDO" == "true" ]]; then sudo mkdir -p "$(dirname "${CONFIG_PATH}")" else @@ -76,51 +73,20 @@ write_file() { fi } -check_docker_daemon_is_running() { - if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then - # Podman doesn't require a daemon - echo "Using Podman (no daemon required)" - return 0 - fi - - if [[ "$(uname -s)" != "Linux" ]]; then - echo "Skipping docker daemon check when not running in Linux" - return 0 - fi - - if systemctl is-active --quiet docker; then - echo "Docker is already running." - else - echo "Docker is not running. Starting Docker..." - # Start the Docker daemon - sudo systemctl start docker - for _ in {1..15}; do - if systemctl is-active --quiet docker; then - echo "Docker started successfully." - return 0 - fi - echo "Waiting for Docker to start..." - sleep 3 - done - fi -} - remove_element() { local config_option="$1" - local tmpfile=$(mktemp) + local tmpfile + tmpfile=$(mktemp) - # Initialize config file if it doesn't exist if [[ ! -f "${CONFIG_PATH}" ]]; then write_file '{}' "${CONFIG_PATH}" fi - # Remove the specified element using jq exec_cmd jq 'del(.'"${config_option}"')' "${CONFIG_PATH}" > "${tmpfile}" exec_cmd cp "${tmpfile}" "${CONFIG_PATH}" rm "${tmpfile}" } -# Container runtime login wrapper container_login() { local username="$1" local registry="$2" @@ -132,21 +98,14 @@ container_login() { fi } -# This is the script which performs container authentication to different registries that we use (so far ECR and RedHat) -# As the result of this login the config file will have all the 'auth' information necessary to work with container registries - -setup_container_runtime - -check_docker_daemon_is_running +setup_validate_container_runtime -# Initialize config file if it doesn't exist if [[ ! -f "${CONFIG_PATH}" ]]; then write_file '{}' "${CONFIG_PATH}" fi if [[ -f "${CONFIG_PATH}" ]]; then if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then - # Check if login is actually required by making a HEAD request to ECR using existing credentials echo "Checking if container registry credentials are valid..." ecr_auth=$(exec_cmd jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") diff --git a/scripts/dev/prepare_local_e2e_run.sh b/scripts/dev/prepare_local_e2e_run.sh index 2139b894b..e6b1b9bcd 100755 --- a/scripts/dev/prepare_local_e2e_run.sh +++ b/scripts/dev/prepare_local_e2e_run.sh @@ -49,7 +49,7 @@ ensure_namespace "${NAMESPACE}" 2>&1 | prepend "ensure_namespace" echo "Deleting ~/.docker/.config.json and re-creating it" rm ~/.docker/config.json || true -scripts/dev/configure_docker_auth.sh 2>&1 | prepend "configure_docker_auth" +scripts/dev/configure_container_auth.sh 2>&1 | prepend "configure_docker_auth" echo "Configuring operator" scripts/evergreen/e2e/configure_operator.sh 2>&1 | prepend "configure_operator" diff --git a/scripts/evergreen/setup_aws.sh b/scripts/evergreen/setup_aws.sh index 5563a8a30..4900de4fd 100755 --- a/scripts/evergreen/setup_aws.sh +++ b/scripts/evergreen/setup_aws.sh @@ -2,34 +2,16 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +source scripts/funcs/install -# Detect system architecture -detect_architecture() { - local arch - arch=$(uname -m) - echo "Detected architecture: ${arch}" >&2 - echo "${arch}" -} - -# Install AWS CLI v2 via binary download (for x86_64 and aarch64) +# Install AWS CLI v2 via binary download (for amd64 and arm64) install_aws_cli_binary() { local arch="$1" echo "Installing AWS CLI v2 via binary download for ${arch}..." # Map architecture names for AWS CLI download URLs local aws_arch - case "${arch}" in - x86_64) - aws_arch="x86_64" - ;; - aarch64|arm64) - aws_arch="aarch64" - ;; - *) - echo "Error: Unsupported architecture for binary installation: ${arch}" >&2 - return 1 - ;; - esac + aws_arch=$(uname -m) # Download and install AWS CLI v2 local temp_dir @@ -38,7 +20,7 @@ install_aws_cli_binary() { echo "Downloading AWS CLI v2 for ${aws_arch}..." curl -s "https://awscli.amazonaws.com/awscli-exe-linux-${aws_arch}.zip" -o "awscliv2.zip" - + unzip -q awscliv2.zip sudo ./aws/install --update @@ -97,11 +79,7 @@ install_aws_cli() { arch=$(detect_architecture) case "${arch}" in - ppc64le|s390x) - echo "IBM architecture detected (${arch}). Using pip installation..." - install_aws_cli_pip - ;; - x86_64|aarch64|arm64) + amd64|arm64) echo "Standard architecture detected (${arch}). Using binary installation..." install_aws_cli_binary "${arch}" ;; diff --git a/scripts/evergreen/setup_jq.sh b/scripts/evergreen/setup_jq.sh index ed09a4dfe..5aaa2f3f6 100755 --- a/scripts/evergreen/setup_jq.sh +++ b/scripts/evergreen/setup_jq.sh @@ -9,32 +9,7 @@ set -Eeou pipefail source scripts/funcs/install -# Detect and map architecture for jq releases -detect_jq_architecture() { - local arch - arch=$(uname -m) - - case "${arch}" in - x86_64) - echo "amd64" - ;; - aarch64|arm64) - echo "arm64" - ;; - ppc64le) - echo "ppc64el" # jq uses ppc64el instead of ppc64le - ;; - s390x) - echo "s390x" - ;; - *) - echo "Error: Unsupported architecture for jq: ${arch}" >&2 - exit 1 - ;; - esac -} - -jq_arch=$(detect_jq_architecture) -echo "Detected architecture: $(uname -m), using jq architecture: ${jq_arch}" +jq_arch=$(detect_architecture "jq") +echo "Detected architecture: ${jq_arch}" download_and_install_binary "${PROJECT_DIR:-${workdir}}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${jq_arch}" diff --git a/scripts/evergreen/setup_kind.sh b/scripts/evergreen/setup_kind.sh index b8a907404..3df0aa620 100755 --- a/scripts/evergreen/setup_kind.sh +++ b/scripts/evergreen/setup_kind.sh @@ -2,16 +2,12 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +source scripts/funcs/install # Store the lowercase name of Operating System os=$(uname | tr '[:upper:]' '[:lower:]') # Detect architecture -arch=$(uname -m) -case ${arch} in - x86_64) arch_suffix="amd64" ;; - aarch64|arm64) arch_suffix="arm64" ;; - *) echo "Unsupported architecture: ${arch}" >&2; exit 1 ;; -esac +arch_suffix=$(detect_architecture) # This should be changed when needed latest_version="v0.27.0" diff --git a/scripts/evergreen/setup_kubectl.sh b/scripts/evergreen/setup_kubectl.sh index 00cf975fd..46e86279f 100755 --- a/scripts/evergreen/setup_kubectl.sh +++ b/scripts/evergreen/setup_kubectl.sh @@ -2,32 +2,7 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh - -# Detect system architecture and map to kubectl/helm architecture names -detect_architecture() { - local arch - arch=$(uname -m) - - case "${arch}" in - x86_64) - echo "amd64" - ;; - aarch64|arm64) - echo "arm64" - ;; - ppc64le) - echo "ppc64le" - ;; - s390x) - echo "s390x" - ;; - *) - echo "Unsupported architecture: ${arch}" >&2 - echo "Supported architectures: x86_64 (amd64), aarch64 (arm64), ppc64le, s390x" >&2 - exit 1 - ;; - esac -} +source scripts/funcs/install # Detect the current architecture ARCH=$(detect_architecture) diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index 5424ecfb4..1c7d6d3a8 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -5,11 +5,12 @@ # Can be run on static hosts for testing and verification source scripts/dev/set_env_context.sh +source scripts/funcs/install set -Eeoux pipefail echo "==========================================" echo "Setting up minikube host with multi-architecture support" -echo "Architecture: $(uname -m)" +echo "Architecture: $(detect_architecture)" echo "OS: $(uname -s)" echo "==========================================" @@ -51,7 +52,7 @@ run_setup_step "jq Setup" "scripts/evergreen/setup_jq.sh" run_setup_step "Minikube Host Setup with Container Runtime Detection" "scripts/minikube/setup_minikube_host.sh" export CONTAINER_RUNTIME=podman -run_setup_step "Container Registry Authentication" "scripts/dev/configure_docker_auth.sh" +run_setup_step "Container Registry Authentication" "scripts/dev/configure_container_auth.sh" # The minikube cluster is already started by the setup_minikube_host.sh script echo "" diff --git a/scripts/funcs/install b/scripts/funcs/install index fee7fc657..978ed5275 100644 --- a/scripts/funcs/install +++ b/scripts/funcs/install @@ -2,6 +2,41 @@ set -euo pipefail +# Supported target formats: +# - "standard" (default): x86_64→amd64, aarch64|arm64→arm64, ppc64le→ppc64le, s390x→s390x +# - "jq": same as standard but ppc64le→ppc64el (jq's naming convention) +# +detect_architecture() { + local target_format="${1:-standard}" + local arch + arch=$(uname -m) + # Use standard mapping for most tools, with special case for jq's ppc64le naming + local ppc64_suffix="ppc64le" + if [[ "${target_format}" == "jq" ]]; then + ppc64_suffix="ppc64el" # jq uses ppc64el instead of ppc64le + fi + + case "${arch}" in + x86_64) + echo "amd64" + ;; + aarch64|arm64) + echo "arm64" + ;; + ppc64le) + echo "${ppc64_suffix}" + ;; + s390x) + echo "s390x" + ;; + *) + echo "Error: Unsupported architecture: ${arch}" >&2 + echo "Supported architectures: x86_64 (amd64), aarch64 (arm64), ppc64le, s390x" >&2 + return 1 + ;; + esac +} + # Downloads a binary from and moves it into directory. # Example usage: download_and_install_binary ${workdir}/bin jq "https://..." download_and_install_binary() { diff --git a/scripts/minikube/install-minikube.sh b/scripts/minikube/install-minikube.sh index 527b27543..cacfd7618 100755 --- a/scripts/minikube/install-minikube.sh +++ b/scripts/minikube/install-minikube.sh @@ -5,26 +5,7 @@ source scripts/dev/set_env_context.sh source scripts/funcs/install # Detect architecture -ARCH=$(uname -m) -case "${ARCH}" in - x86_64) - MINIKUBE_ARCH="amd64" - ;; - aarch64) - MINIKUBE_ARCH="arm64" - ;; - ppc64le) - MINIKUBE_ARCH="ppc64le" - ;; - s390x) - MINIKUBE_ARCH="s390x" - ;; - *) - echo "Error: Unsupported architecture: ${ARCH}" - echo "Supported architectures: x86_64, aarch64, ppc64le, s390x" - exit 1 - ;; -esac +ARCH=$(detect_architecture) echo "Installing minikube on ${ARCH} architecture..." @@ -34,7 +15,7 @@ CRICTL_VERSION=$(curl -s https://api.github.com/repos/kubernetes-sigs/cri-tools/ # Download and extract crictl tar.gz mkdir -p "${PROJECT_DIR:-.}/bin" -CRICTL_URL="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${MINIKUBE_ARCH}.tar.gz" +CRICTL_URL="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${ARCH}.tar.gz" echo "Downloading ${CRICTL_URL}" TEMP_DIR=$(mktemp -d) curl --retry 3 --silent -L "${CRICTL_URL}" -o "${TEMP_DIR}/crictl.tar.gz" @@ -53,7 +34,7 @@ if [[ -f "${PROJECT_DIR:-.}/bin/crictl" ]]; then sudo chmod +x /usr/local/bin/crictl sudo chmod +x /usr/bin/crictl echo "✅ crictl installed to /usr/local/bin/ and /usr/bin/" - + # Verify installation if command -v crictl >/dev/null 2>&1; then echo "✅ crictl is now available in PATH: $(which crictl)" @@ -70,6 +51,6 @@ echo "Installing minikube for ${ARCH}..." MINIKUBE_VERSION=$(curl -s https://api.github.com/repos/kubernetes/minikube/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') # Download minikube for detected architecture -download_and_install_binary "${PROJECT_DIR:-.}/bin" minikube "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-${MINIKUBE_ARCH}" +download_and_install_binary "${PROJECT_DIR:-.}/bin" minikube "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-${ARCH}" echo "Crictl ${CRICTL_VERSION} and Minikube ${MINIKUBE_VERSION} installed successfully for ${ARCH}" diff --git a/scripts/minikube/minikube_host.sh b/scripts/minikube/minikube_host.sh index 09c5b80f7..971587ec3 100755 --- a/scripts/minikube/minikube_host.sh +++ b/scripts/minikube/minikube_host.sh @@ -12,15 +12,16 @@ test "${MDB_BASH_DEBUG:-0}" -eq 1 && set -x source scripts/dev/set_env_context.sh source scripts/funcs/printing +source scripts/funcs/install -if [[ -z "${S390_HOST_NAME}" ]]; then - echo "S390_HOST_NAME env var is missing" +if [[ -z "${MINIKUBE_HOST_NAME}" ]]; then + echo "MINIKUBE_HOST_NAME env var is missing" echo "Set it to your s390x host connection string (e.g., user@hostname)" exit 1 fi get_host_url() { - echo "${S390_HOST_NAME}" + echo "${MINIKUBE_HOST_NAME}" } cmd=${1-""} @@ -29,7 +30,7 @@ if [[ "${cmd}" != "" && "${cmd}" != "help" ]]; then host_url=$(get_host_url) fi -kubeconfig_path="${HOME}/.operator-dev/s390-host.kubeconfig" +kubeconfig_path="${HOME}/.operator-dev/minikube-host.kubeconfig" configure() { ssh -T -q "${host_url}" "sudo chown \$(whoami):\$(whoami) ~/.docker || true; mkdir -p ~/.docker" @@ -102,10 +103,10 @@ get-kubeconfig() { } recreate-minikube-cluster() { - configure "$(uname -m)" 2>&1| prepend "minikube_host.sh configure" - echo "Recreating minikube cluster on ${S390_HOST_NAME} (${host_url})..." + configure "$(detect_architecture)" 2>&1| prepend "minikube_host.sh configure" + echo "Recreating minikube cluster on ${MINIKUBE_HOST_NAME} (${host_url})..." # shellcheck disable=SC2088 - ssh -T "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; minikube delete || true; minikube start --driver=podman --memory=8192mb --cpus=4" + ssh -T "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; minikube delete || true; ./scripts/minikube/setup_minikube_host.sh" echo "Copying kubeconfig to ${kubeconfig_path}" get-kubeconfig } @@ -175,7 +176,7 @@ usage() { PREREQUISITES: - s390x host with SSH access - - define S390_HOST_NAME env var (e.g., export S390_HOST_NAME=user@hostname) + - define MINIKUBE_HOST_NAME env var (e.g., export MINIKUBE_HOST_NAME=user@hostname) - SSH key-based authentication configured COMMANDS: @@ -183,7 +184,7 @@ COMMANDS: sync rsync of project directory recreate-minikube-cluster recreates minikube cluster and executes get-kubeconfig remote-prepare-local-e2e-run executes prepare-local-e2e on the remote host - get-kubeconfig copies remote minikube kubeconfig locally to ~/.operator-dev/s390-host.kubeconfig + get-kubeconfig copies remote minikube kubeconfig locally to ~/.operator-dev/minikube-host.kubeconfig tunnel [args] creates ssh session with tunneling to all API servers ssh [args] creates ssh session passing optional arguments to ssh cmd [command with args] execute command as if being on s390x host @@ -191,7 +192,7 @@ COMMANDS: help this message EXAMPLES: - export S390_HOST_NAME=user@ibmz8 + export MINIKUBE_HOST_NAME=user@ibmz8 minikube_host.sh tunnel minikube_host.sh cmd 'make e2e test=replica_set' " diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh index 16f1c1c31..1d2618c32 100755 --- a/scripts/minikube/setup_minikube_host.sh +++ b/scripts/minikube/setup_minikube_host.sh @@ -2,6 +2,7 @@ # this script downloads necessary tooling for alternative architectures (s390x, ppc64le) using minikube (similar to setup_evg_host.sh) source scripts/dev/set_env_context.sh +source scripts/funcs/install set -Eeou pipefail @@ -28,7 +29,7 @@ EOF } # retrieve arch variable off the shell command line -ARCH=${1-"$(uname -m)"} +ARCH=${1-"$(detect_architecture)"} echo "Setting up minikube host for architecture: ${ARCH}" From 88c76bc880e8a7540a4e12ac0e1db847b2f07b71 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 7 Aug 2025 15:34:35 +0200 Subject: [PATCH 083/164] Rename trace --- scripts/release/atomic_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index fe29289dd..5d354307f 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -47,7 +47,7 @@ def load_release_file() -> Dict: return json.load(release) -@TRACER.start_as_current_span("sonar_build_image") +@TRACER.start_as_current_span("pipeline_process_image") def pipeline_process_image( image_name: str, dockerfile_path: str, From 0fd4db8fb6c1091bb7f903338a53e91da9a2aff3 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 7 Aug 2025 15:36:55 +0200 Subject: [PATCH 084/164] Remove operator build --- Makefile | 2 +- scripts/release/atomic_pipeline.py | 6 -- scripts/release/optimized_operator_build.py | 87 --------------------- scripts/release/pipeline_main.py | 2 - 4 files changed, 1 insertion(+), 96 deletions(-) delete mode 100644 scripts/release/optimized_operator_build.py diff --git a/Makefile b/Makefile index f473918c5..069dd64ca 100644 --- a/Makefile +++ b/Makefile @@ -154,7 +154,7 @@ aws_cleanup: @ scripts/evergreen/prepare_aws.sh build-and-push-operator-image: aws_login - @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py operator-quick + @ scripts/evergreen/run_python.sh scripts/release/pipeline_main.py operator build-and-push-database-image: aws_login @ scripts/dev/build_push_database_image diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 5d354307f..af17a8be7 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -25,7 +25,6 @@ from .build_configuration import BuildConfiguration from .build_context import BuildScenario from .build_images import process_image -from .optimized_operator_build import build_operator_image_fast TRACER = trace.get_tracer("evergreen-agent") @@ -159,11 +158,6 @@ def build_operator_image(build_configuration: BuildConfiguration): ) -def build_operator_image_patch(build_configuration: BuildConfiguration): - if not build_operator_image_fast(build_configuration): - build_operator_image(build_configuration) - - def build_database_image(build_configuration: BuildConfiguration): """ Builds a new database image. diff --git a/scripts/release/optimized_operator_build.py b/scripts/release/optimized_operator_build.py deleted file mode 100644 index c59e3c003..000000000 --- a/scripts/release/optimized_operator_build.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import subprocess -import tarfile -from datetime import datetime, timedelta, timezone - -import docker -from lib.base_logger import logger -from scripts.release.build_configuration import BuildConfiguration - - -def copy_into_container(client, src, dst): - """Copies a local file into a running container.""" - - os.chdir(os.path.dirname(src)) - srcname = os.path.basename(src) - with tarfile.open(src + ".tar", mode="w") as tar: - tar.add(srcname) - - name, dst = dst.split(":") - container = client.containers.get(name) - - with open(src + ".tar", "rb") as fd: - container.put_archive(os.path.dirname(dst), fd.read()) - - -def build_operator_image_fast(build_configuration: BuildConfiguration) -> bool: - """This function builds the operator locally and pushed into an existing - Docker image. This is the fastest way I could image we can do this.""" - - client = docker.from_env() - # image that we know is where we build operator. - image_repo = build_configuration.base_registry + "/" + build_configuration.image_type + "/mongodb-kubernetes" - image_tag = "latest" - repo_tag = image_repo + ":" + image_tag - - logger.debug(f"Pulling image: {repo_tag}") - try: - image = client.images.get(repo_tag) - except docker.errors.ImageNotFound: - logger.debug("Operator image does not exist locally. Building it now") - return False - - logger.debug("Done") - too_old = datetime.now() - timedelta(hours=3) - image_timestamp = datetime.fromtimestamp( - image.history()[0]["Created"] - ) # Layer 0 is the latest added layer to this Docker image. [-1] is the FROM layer. - - if image_timestamp < too_old: - logger.info("Current operator image is too old, will rebuild it completely first") - return False - - container_name = "mongodb-enterprise-operator" - operator_binary_location = "/usr/local/bin/mongodb-kubernetes-operator" - try: - client.containers.get(container_name).remove() - logger.debug(f"Removed {container_name}") - except docker.errors.NotFound: - pass - - container = client.containers.run(repo_tag, name=container_name, entrypoint="sh", detach=True) - - logger.debug("Building operator with debugging symbols") - subprocess.run(["make", "manager"], check=True, stdout=subprocess.PIPE) - logger.debug("Done building the operator") - - copy_into_container( - client, - os.getcwd() + "/docker/mongodb-kubernetes-operator/content/mongodb-kubernetes-operator", - container_name + ":" + operator_binary_location, - ) - - # Commit changes on disk as a tag - container.commit( - repository=image_repo, - tag=image_tag, - ) - # Stop this container so we can use it next time - container.stop() - container.remove() - - logger.info("Pushing operator to {}:{}".format(image_repo, image_tag)) - client.images.push( - repository=image_repo, - tag=image_tag, - ) - return True diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 5b4cc1195..3f7b9473d 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -25,7 +25,6 @@ build_mco_tests_image, build_om_image, build_operator_image, - build_operator_image_patch, build_readiness_probe_image, build_tests_image, build_upgrade_hook_image, @@ -54,7 +53,6 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: "mco-test": build_mco_tests_image, "readiness-probe": build_readiness_probe_image, "upgrade-hook": build_upgrade_hook_image, - "operator-quick": build_operator_image_patch, "database": build_database_image, "agent": build_agent_default_case, # From ee86ebf401ed7fc937c88cdae9245092e1812fd0 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 7 Aug 2025 15:55:22 +0200 Subject: [PATCH 085/164] Doc and logs --- scripts/release/atomic_pipeline.py | 17 ++++++----------- scripts/release/build_context.py | 3 ++- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index af17a8be7..a770b9962 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -9,7 +9,7 @@ from concurrent.futures import ProcessPoolExecutor from copy import copy from queue import Queue -from typing import Dict, List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple import requests import semver @@ -202,7 +202,7 @@ def find_om_in_releases(om_version: str, releases: Dict[str, str]) -> Optional[s def get_om_releases() -> Dict[str, str]: - """Returns a dictionary representation of the Json document holdin all the OM + """Returns a dictionary representation of the Json document holding all the OM releases. """ ops_manager_release_archive = ( @@ -267,8 +267,7 @@ def build_image_generic( extra_args: dict | None = None, ): """ - Build one or more platform-specific images, then (optionally) - push a manifest and sign the result. + Build an image then (optionally) sign the result. """ registry = build_configuration.base_registry @@ -378,9 +377,6 @@ def build_agent_pipeline( ): build_configuration_copy = copy(build_configuration) build_configuration_copy.version = image_version - print( - f"======== Building agent pipeline for version {image_version}, build configuration version: {build_configuration.version}" - ) args = { "version": image_version, "agent_version": agent_version, @@ -404,7 +400,6 @@ def build_agent_default_case(build_configuration: BuildConfiguration): """ Build the agent only for the latest operator for patches and operator releases. - See more information in the function: build_agent_on_agent_bump """ release = load_release_file() @@ -426,12 +421,12 @@ def build_agent_default_case(build_configuration: BuildConfiguration): if build_configuration.parallel_factor > 0: max_workers = build_configuration.parallel_factor with ProcessPoolExecutor(max_workers=max_workers) as executor: - logger.info(f"running with factor of {max_workers}") - print(f"======= Versions to build {agent_versions_to_build} =======") + logger.info(f"Running with factor of {max_workers}") + logger.info(f"======= Agent versions to build {agent_versions_to_build} =======") for idx, agent_version in enumerate(agent_versions_to_build): # We don't need to keep create and push the same image on every build. # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. - print(f"======= Building Agent {agent_version} ({idx}/{len(agent_versions_to_build)})") + logger.info(f"======= Building Agent {agent_version} ({idx}/{len(agent_versions_to_build)})") _build_agent_operator( agent_version, build_configuration, diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py index 9a0e1ccd4..143693f46 100644 --- a/scripts/release/build_context.py +++ b/scripts/release/build_context.py @@ -11,7 +11,7 @@ class BuildScenario(str, Enum): RELEASE = "release" # Official release triggered by a git tag PATCH = "patch" # CI build for a patch/pull request - STAGING = "staging" # CI build from a merge to the master + STAGING = "staging" # CI build from a merge to the master branch DEVELOPMENT = "development" # Local build on a developer machine @classmethod @@ -71,6 +71,7 @@ def get_version(self) -> str: return self.git_tag if self.patch_id: return self.patch_id + # Alternatively, we can fail here if no ID is explicitly defined return "latest" def get_base_registry(self) -> str: From 5f5940f5ceef07dd0138f79cce95b1e343ab8e7e Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 7 Aug 2025 17:27:09 +0200 Subject: [PATCH 086/164] Use build_image_generic for test images too --- scripts/release/atomic_pipeline.py | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index a770b9962..1fc2e9e4d 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -104,13 +104,13 @@ def build_tests_image(build_configuration: BuildConfiguration): if python_version == "": raise Exception("Missing PYTHON_VERSION environment variable") - buildargs = dict({"PYTHON_VERSION": python_version}) + buildargs = {"PYTHON_VERSION": python_version} - pipeline_process_image( - image_name, + build_image_generic( + image_name=image_name, dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", build_configuration=build_configuration, - dockerfile_args=buildargs, + extra_args=buildargs, build_path="docker/mongodb-kubernetes-tests", ) @@ -124,13 +124,13 @@ def build_mco_tests_image(build_configuration: BuildConfiguration): if golang_version == "": raise Exception("Missing GOLANG_VERSION environment variable") - buildargs = dict({"GOLANG_VERSION": golang_version}) + buildargs = {"GOLANG_VERSION": golang_version} - pipeline_process_image( - image_name, + build_image_generic( + image_name=image_name, dockerfile_path="docker/mongodb-community-tests/Dockerfile", build_configuration=build_configuration, - dockerfile_args=buildargs, + extra_args=buildargs, ) @@ -265,6 +265,7 @@ def build_image_generic( dockerfile_path: str, build_configuration: BuildConfiguration, extra_args: dict | None = None, + build_path: str = ".", ): """ Build an image then (optionally) sign the result. @@ -285,6 +286,7 @@ def build_image_generic( dockerfile_path=dockerfile_path, build_configuration=build_configuration, dockerfile_args=build_args, + build_path=build_path, ) if build_configuration.sign: From 6dd208f9ebc35788ae6990197ea6b4c3d081ec26 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 7 Aug 2025 17:27:21 +0200 Subject: [PATCH 087/164] Remove unused sign images in repositories --- scripts/release/atomic_pipeline.py | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 1fc2e9e4d..3c00f7349 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -171,20 +171,6 @@ def build_database_image(build_configuration: BuildConfiguration): ) -@TRACER.start_as_current_span("sign_image_in_repositories") -def sign_image_in_repositories(args: Dict[str, str], arch: str = None): - span = trace.get_current_span() - repository = args["quay_registry"] + args["ubi_suffix"] - tag = args["release_version"] - if arch: - tag = f"{tag}-{arch}" - - span.set_attribute("mck.tag", tag) - - sign_image(repository, tag) - verify_signature(repository, tag) - - def find_om_in_releases(om_version: str, releases: Dict[str, str]) -> Optional[str]: """ There are a few alternatives out there that allow for json-path or xpath-type From 493d4d67075ebb494be82dc0e09adffa41c2556a Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 7 Aug 2025 17:36:26 +0200 Subject: [PATCH 088/164] Remove pipeline_process_image --- scripts/release/atomic_pipeline.py | 52 ++++++++++-------------------- 1 file changed, 17 insertions(+), 35 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 3c00f7349..6d6654b83 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -46,36 +46,6 @@ def load_release_file() -> Dict: return json.load(release) -@TRACER.start_as_current_span("pipeline_process_image") -def pipeline_process_image( - image_name: str, - dockerfile_path: str, - build_configuration: BuildConfiguration, - dockerfile_args: Dict[str, str] = None, - build_path: str = ".", -): - """Builds a Docker image with arguments defined in `args`.""" - span = trace.get_current_span() - span.set_attribute("mck.image_name", image_name) - if dockerfile_args: - span.set_attribute("mck.build_args", str(dockerfile_args)) - - logger.info(f"Dockerfile args: {dockerfile_args}, for image: {image_name}") - - if not dockerfile_args: - dockerfile_args = {} - logger.debug(f"Build args: {dockerfile_args}") - process_image( - image_name, - image_tag=build_configuration.version, - dockerfile_path=dockerfile_path, - dockerfile_args=dockerfile_args, - base_registry=build_configuration.base_registry, - platforms=build_configuration.platforms, - sign=build_configuration.sign, - build_path=build_path, - ) - def build_tests_image(build_configuration: BuildConfiguration): """ @@ -246,6 +216,7 @@ def build_om_image(build_configuration: BuildConfiguration): ) +@TRACER.start_as_current_span("build_image_generic") def build_image_generic( image_name: str, dockerfile_path: str, @@ -256,22 +227,33 @@ def build_image_generic( """ Build an image then (optionally) sign the result. """ + # Tracing setup + span = trace.get_current_span() + span.set_attribute("mck.image_name", image_name) registry = build_configuration.base_registry args_list = extra_args or {} version = args_list.get("version", "") - # merge in the registry without mutating caller’s dict + # merge in the registry without mutating caller's dict build_args = {**args_list, "quay_registry": registry} + + if build_args: + span.set_attribute("mck.build_args", str(build_args)) + + logger.info(f"Building {image_name}, dockerfile args: {build_args}") logger.debug(f"Build args: {build_args}") - logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") logger.debug(f"build image generic - registry={registry}") - pipeline_process_image( - image_name=image_name, + + process_image( + image_name, + image_tag=build_configuration.version, dockerfile_path=dockerfile_path, - build_configuration=build_configuration, dockerfile_args=build_args, + base_registry=build_configuration.base_registry, + platforms=build_configuration.platforms, + sign=build_configuration.sign, build_path=build_path, ) From a21b254ba8cd9b7dcb9e34de63dd14a3ea11129d Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 7 Aug 2025 17:54:05 +0200 Subject: [PATCH 089/164] Remove process_image --- scripts/release/atomic_pipeline.py | 26 ++++++++++++---------- scripts/release/build_images.py | 35 +++--------------------------- 2 files changed, 17 insertions(+), 44 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 6d6654b83..ad8beef44 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -24,7 +24,7 @@ from .build_configuration import BuildConfiguration from .build_context import BuildScenario -from .build_images import process_image +from .build_images import build_image TRACER = trace.get_tracer("evergreen-agent") @@ -233,7 +233,6 @@ def build_image_generic( registry = build_configuration.base_registry args_list = extra_args or {} - version = args_list.get("version", "") # merge in the registry without mutating caller's dict build_args = {**args_list, "quay_registry": registry} @@ -246,20 +245,23 @@ def build_image_generic( logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") logger.debug(f"build image generic - registry={registry}") - process_image( - image_name, - image_tag=build_configuration.version, - dockerfile_path=dockerfile_path, - dockerfile_args=build_args, - base_registry=build_configuration.base_registry, + # Build docker registry URI and call build_image + docker_registry = f"{build_configuration.base_registry}/{image_name}" + image_full_uri = f"{docker_registry}:{build_configuration.version}" + + build_image( + tag=image_full_uri, + dockerfile=dockerfile_path, + path=build_path, + args=build_args, + push=True, platforms=build_configuration.platforms, - sign=build_configuration.sign, - build_path=build_path, ) if build_configuration.sign: - sign_image(registry, version) - verify_signature(registry, version) + logger.info("Signing image") + sign_image(docker_registry, build_configuration.version) + verify_signature(docker_registry, build_configuration.version) def build_init_appdb(build_configuration: BuildConfiguration): diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 4ffcae04c..0cb2c91cc 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -9,7 +9,6 @@ import docker from lib.base_logger import logger -from scripts.evergreen.release.images_signing import sign_image, verify_signature def ecr_login_boto3(region: str, account_id: str): @@ -83,6 +82,9 @@ def build_image( :param push: Whether to push the image after building :param platforms: List of target platforms (e.g., ["linux/amd64", "linux/arm64"]) """ + # Login to ECR before building + ecr_login_boto3(region="us-east-1", account_id="268558157000") + docker = python_on_whales.docker try: @@ -126,34 +128,3 @@ def build_image( raise RuntimeError(f"Failed to build image {tag}: {str(e)}") -def process_image( - image_name: str, - image_tag: str, - dockerfile_path: str, - dockerfile_args: Dict[str, str], - base_registry: str, - platforms: list[str] = None, - sign: bool = False, - build_path: str = ".", - push: bool = True, -): - # Login to ECR - ecr_login_boto3(region="us-east-1", account_id="268558157000") - - docker_registry = f"{base_registry}/{image_name}" - image_full_uri = f"{docker_registry}:{image_tag}" - - # Build image with docker buildx - build_image( - tag=image_full_uri, - dockerfile=dockerfile_path, - path=build_path, - args=dockerfile_args, - push=push, - platforms=platforms, - ) - - if sign: - logger.info("Signing image") - sign_image(docker_registry, image_tag) - verify_signature(docker_registry, image_tag) From a7db180dc804937e4327efc2224a43034e1e0193 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 7 Aug 2025 18:00:38 +0200 Subject: [PATCH 090/164] Rename function --- scripts/release/atomic_pipeline.py | 26 +++++++++++++------------- scripts/release/build_images.py | 2 +- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index ad8beef44..b8645fb3d 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -24,7 +24,7 @@ from .build_configuration import BuildConfiguration from .build_context import BuildScenario -from .build_images import build_image +from .build_images import execute_docker_build TRACER = trace.get_tracer("evergreen-agent") @@ -76,7 +76,7 @@ def build_tests_image(build_configuration: BuildConfiguration): buildargs = {"PYTHON_VERSION": python_version} - build_image_generic( + build_image( image_name=image_name, dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", build_configuration=build_configuration, @@ -96,7 +96,7 @@ def build_mco_tests_image(build_configuration: BuildConfiguration): buildargs = {"GOLANG_VERSION": golang_version} - build_image_generic( + build_image( image_name=image_name, dockerfile_path="docker/mongodb-community-tests/Dockerfile", build_configuration=build_configuration, @@ -120,7 +120,7 @@ def build_operator_image(build_configuration: BuildConfiguration): logger.info(f"Building Operator args: {args}") image_name = "mongodb-kubernetes" - build_image_generic( + build_image( image_name=image_name, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile", build_configuration=build_configuration, @@ -133,7 +133,7 @@ def build_database_image(build_configuration: BuildConfiguration): Builds a new database image. """ args = {"version": build_configuration.version} - build_image_generic( + build_image( image_name="mongodb-kubernetes-database", dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile", build_configuration=build_configuration, @@ -184,7 +184,7 @@ def find_om_url(om_version: str) -> str: def build_init_om_image(build_configuration: BuildConfiguration): args = {"version": build_configuration.version} - build_image_generic( + build_image( image_name="mongodb-kubernetes-init-ops-manager", dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile", build_configuration=build_configuration, @@ -208,7 +208,7 @@ def build_om_image(build_configuration: BuildConfiguration): "om_download_url": om_download_url, } - build_image_generic( + build_image( image_name="mongodb-enterprise-ops-manager-ubi", dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile", build_configuration=build_configuration, @@ -217,7 +217,7 @@ def build_om_image(build_configuration: BuildConfiguration): @TRACER.start_as_current_span("build_image_generic") -def build_image_generic( +def build_image( image_name: str, dockerfile_path: str, build_configuration: BuildConfiguration, @@ -249,7 +249,7 @@ def build_image_generic( docker_registry = f"{build_configuration.base_registry}/{image_name}" image_full_uri = f"{docker_registry}:{build_configuration.version}" - build_image( + execute_docker_build( tag=image_full_uri, dockerfile=dockerfile_path, path=build_path, @@ -269,7 +269,7 @@ def build_init_appdb(build_configuration: BuildConfiguration): base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} - build_image_generic( + build_image( image_name="mongodb-kubernetes-init-appdb", dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile", build_configuration=build_configuration, @@ -283,7 +283,7 @@ def build_init_database(build_configuration: BuildConfiguration): base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} - build_image_generic( + build_image( "mongodb-kubernetes-init-database", "docker/mongodb-kubernetes-init-database/Dockerfile", build_configuration=build_configuration, @@ -317,7 +317,7 @@ def build_community_image(build_configuration: BuildConfiguration, image_type: s "GOLANG_VERSION": golang_version, } - build_image_generic( + build_image( image_name=image_name, dockerfile_path=dockerfile_path, build_configuration=build_configuration, @@ -360,7 +360,7 @@ def build_agent_pipeline( "quay_registry": build_configuration.base_registry, } - build_image_generic( + build_image( image_name="mongodb-agent-ubi", dockerfile_path="docker/mongodb-agent/Dockerfile", build_configuration=build_configuration_copy, diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 0cb2c91cc..755791ae4 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -69,7 +69,7 @@ def ensure_buildx_builder(builder_name: str = "multiarch") -> str: return builder_name -def build_image( +def execute_docker_build( tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, push: bool = True, platforms: list[str] = None ): """ From 52b8662bb96c5d2ba9115ddf716efa93bacc993a Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Thu, 7 Aug 2025 18:01:03 +0200 Subject: [PATCH 091/164] Lint --- scripts/release/atomic_pipeline.py | 7 +++---- scripts/release/build_images.py | 4 +--- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index b8645fb3d..f0ca02e00 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -46,7 +46,6 @@ def load_release_file() -> Dict: return json.load(release) - def build_tests_image(build_configuration: BuildConfiguration): """ Builds image used to run tests. @@ -236,15 +235,15 @@ def build_image( # merge in the registry without mutating caller's dict build_args = {**args_list, "quay_registry": registry} - + if build_args: span.set_attribute("mck.build_args", str(build_args)) - + logger.info(f"Building {image_name}, dockerfile args: {build_args}") logger.debug(f"Build args: {build_args}") logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") logger.debug(f"build image generic - registry={registry}") - + # Build docker registry URI and call build_image docker_registry = f"{build_configuration.base_registry}/{image_name}" image_full_uri = f"{docker_registry}:{build_configuration.version}" diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 755791ae4..e2a43683b 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -84,7 +84,7 @@ def execute_docker_build( """ # Login to ECR before building ecr_login_boto3(region="us-east-1", account_id="268558157000") - + docker = python_on_whales.docker try: @@ -126,5 +126,3 @@ def execute_docker_build( except Exception as e: logger.error(f"Failed to build image {tag}: {e}") raise RuntimeError(f"Failed to build image {tag}: {str(e)}") - - From e4655827bcbaa26c1508a38309ed35c1095b2cb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Fri, 1 Aug 2025 11:20:16 +0200 Subject: [PATCH 092/164] wip: Initial changes --- .evergreen.yml | 4 +- build_info.json | 40 +- scripts/release/atomic_pipeline.py | 433 +++++++++++++----- scripts/release/build/build_info.py | 18 +- scripts/release/build/build_scenario.py | 29 ++ .../build/image_build_configuration.py | 25 + scripts/release/build_configuration.py | 19 - scripts/release/build_context.py | 37 +- scripts/release/build_images.py | 37 +- scripts/release/optimized_operator_build.py | 87 ++++ scripts/release/pipeline_main.py | 138 +++--- 11 files changed, 614 insertions(+), 253 deletions(-) create mode 100644 scripts/release/build/image_build_configuration.py delete mode 100644 scripts/release/build_configuration.py create mode 100644 scripts/release/optimized_operator_build.py diff --git a/.evergreen.yml b/.evergreen.yml index 17d6cd5fe..fe0944afb 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -423,7 +423,7 @@ tasks: - func: build_multi_cluster_binary - func: pipeline vars: - image_name: test + image_name: meko-tests - name: build_mco_test_image commands: @@ -431,7 +431,7 @@ tasks: - func: setup_building_host - func: pipeline vars: - image_name: mco-test + image_name: mco-tests - name: build_operator_ubi commands: diff --git a/build_info.json b/build_info.json index 10935b7ef..9f7d173b8 100644 --- a/build_info.json +++ b/build_info.json @@ -1,6 +1,6 @@ { "images": { - "mongodbOperator": { + "operator": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes", "platforms": [ @@ -22,7 +22,7 @@ ] } }, - "initDatabase": { + "init-database": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database", "platforms": [ @@ -44,7 +44,7 @@ ] } }, - "initAppDb": { + "init-appdb": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb", "platforms": [ @@ -66,7 +66,7 @@ ] } }, - "initOpsManager": { + "init-ops-manager": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager", "platforms": [ @@ -110,7 +110,35 @@ ] } }, - "readinessprobe": { + "meko-tests": { + "patch": { + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-tests", + "platforms": [ + "linux/amd64" + ] + }, + "staging": { + "repository": "quay.io/mongodb/mongodb-kubernetes-tests-stg", + "platforms": [ + "linux/amd64" + ] + } + }, + "mco-tests": { + "patch": { + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-community-tests", + "platforms": [ + "linux/amd64" + ] + }, + "staging": { + "repository": "quay.io/mongodb/mongodb-community-tests-stg", + "platforms": [ + "linux/amd64" + ] + } + }, + "readiness-probe": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe", "platforms": [ @@ -133,7 +161,7 @@ ] } }, - "operator-version-upgrade-post-start-hook": { + "upgrade-hook": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": [ diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index f0ca02e00..0f2caedb5 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -9,7 +9,7 @@ from concurrent.futures import ProcessPoolExecutor from copy import copy from queue import Queue -from typing import Dict, List, Optional, Tuple +from typing import Dict, List, Optional, Tuple, Union import requests import semver @@ -17,16 +17,30 @@ from packaging.version import Version from lib.base_logger import logger +from scripts.evergreen.release.agent_matrix import ( + get_supported_operator_versions, +) from scripts.evergreen.release.images_signing import ( sign_image, verify_signature, ) +from scripts.release.build.image_build_configuration import ImageBuildConfiguration -from .build_configuration import BuildConfiguration -from .build_context import BuildScenario -from .build_images import execute_docker_build +from .build_images import process_image +from .optimized_operator_build import build_operator_image_fast TRACER = trace.get_tracer("evergreen-agent") +DEFAULT_NAMESPACE = "default" + + +def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: + if value is None: + return [] + + if isinstance(value, str): + return [e.strip() for e in value.split(",")] + + return value def get_tools_distro(tools_version: str) -> Dict[str, str]: @@ -41,12 +55,47 @@ def is_running_in_evg_pipeline(): return os.getenv("RUNNING_IN_EVG", "") == "true" +def is_running_in_patch(): + is_patch = os.environ.get("is_patch") + return is_patch is not None and is_patch.lower() == "true" + + def load_release_file() -> Dict: with open("release.json") as release: return json.load(release) -def build_tests_image(build_configuration: BuildConfiguration): +@TRACER.start_as_current_span("sonar_build_image") +def pipeline_process_image( + dockerfile_path: str, + build_configuration: ImageBuildConfiguration, + dockerfile_args: Dict[str, str] = None, + build_path: str = ".", +): + """Builds a Docker image with arguments defined in `args`.""" + image_name = build_configuration.image_name() + span = trace.get_current_span() + span.set_attribute("mck.image_name", image_name) + if dockerfile_args: + span.set_attribute("mck.build_args", str(dockerfile_args)) + + logger.info(f"Dockerfile args: {dockerfile_args}, for image: {image_name}") + + if not dockerfile_args: + dockerfile_args = {} + logger.debug(f"Build args: {dockerfile_args}") + process_image( + image_tag=build_configuration.version, + dockerfile_path=dockerfile_path, + dockerfile_args=dockerfile_args, + registry=build_configuration.registry, + platforms=build_configuration.platforms, + sign=build_configuration.sign, + build_path=build_path, + ) + + +def build_tests_image(build_configuration: ImageBuildConfiguration): """ Builds image used to run tests. """ @@ -69,41 +118,38 @@ def build_tests_image(build_configuration: BuildConfiguration): shutil.copyfile("release.json", "docker/mongodb-kubernetes-tests/release.json") shutil.copyfile("requirements.txt", requirements_dest) - python_version = os.getenv("PYTHON_VERSION", "3.11") + python_version = os.getenv("PYTHON_VERSION", "3.13") if python_version == "": raise Exception("Missing PYTHON_VERSION environment variable") - buildargs = {"PYTHON_VERSION": python_version} + build_args = dict({"PYTHON_VERSION": python_version}) - build_image( - image_name=image_name, + pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", build_configuration=build_configuration, - extra_args=buildargs, + dockerfile_args=build_args, build_path="docker/mongodb-kubernetes-tests", ) -def build_mco_tests_image(build_configuration: BuildConfiguration): +def build_mco_tests_image(build_configuration: ImageBuildConfiguration): """ Builds image used to run community tests. """ - image_name = "mongodb-community-tests" golang_version = os.getenv("GOLANG_VERSION", "1.24") if golang_version == "": raise Exception("Missing GOLANG_VERSION environment variable") - buildargs = {"GOLANG_VERSION": golang_version} + buildargs = dict({"GOLANG_VERSION": golang_version}) - build_image( - image_name=image_name, + pipeline_process_image( dockerfile_path="docker/mongodb-community-tests/Dockerfile", build_configuration=build_configuration, - extra_args=buildargs, + dockerfile_args=buildargs, ) -def build_operator_image(build_configuration: BuildConfiguration): +def build_operator_image(build_configuration: ImageBuildConfiguration): """Calculates arguments required to build the operator image, and starts the build process.""" # In evergreen, we can pass test_suffix env to publish the operator to a quay # repository with a given suffix. @@ -119,27 +165,53 @@ def build_operator_image(build_configuration: BuildConfiguration): logger.info(f"Building Operator args: {args}") image_name = "mongodb-kubernetes" - build_image( - image_name=image_name, + pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile", build_configuration=build_configuration, - extra_args=args, + dockerfile_args=args, ) -def build_database_image(build_configuration: BuildConfiguration): +def build_operator_image_patch(build_configuration: ImageBuildConfiguration): + if not build_operator_image_fast(build_configuration): + build_operator_image(build_configuration) + + +def build_database_image(build_configuration: ImageBuildConfiguration): """ Builds a new database image. """ args = {"version": build_configuration.version} - build_image( - image_name="mongodb-kubernetes-database", + + pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile", build_configuration=build_configuration, - extra_args=args, + dockerfile_args=args, ) +def should_skip_arm64(): + """ + Determines if arm64 builds should be skipped based on environment. + Returns True if running in Evergreen pipeline as a patch. + """ + return is_running_in_evg_pipeline() and is_running_in_patch() + + +@TRACER.start_as_current_span("sign_image_in_repositories") +def sign_image_in_repositories(args: Dict[str, str], arch: str = None): + span = trace.get_current_span() + repository = args["quay_registry"] + args["ubi_suffix"] + tag = args["release_version"] + if arch: + tag = f"{tag}-{arch}" + + span.set_attribute("mck.tag", tag) + + sign_image(repository, tag) + verify_signature(repository, tag) + + def find_om_in_releases(om_version: str, releases: Dict[str, str]) -> Optional[str]: """ There are a few alternatives out there that allow for json-path or xpath-type @@ -157,7 +229,7 @@ def find_om_in_releases(om_version: str, releases: Dict[str, str]) -> Optional[s def get_om_releases() -> Dict[str, str]: - """Returns a dictionary representation of the Json document holding all the OM + """Returns a dictionary representation of the Json document holdin all the OM releases. """ ops_manager_release_archive = ( @@ -181,17 +253,16 @@ def find_om_url(om_version: str) -> str: return current_release -def build_init_om_image(build_configuration: BuildConfiguration): +def build_init_om_image(build_configuration: ImageBuildConfiguration): args = {"version": build_configuration.version} - build_image( - image_name="mongodb-kubernetes-init-ops-manager", + pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile", build_configuration=build_configuration, - extra_args=args, + dockerfile_args=args, ) -def build_om_image(build_configuration: BuildConfiguration): +def build_om_image(build_configuration: ImageBuildConfiguration): # Make this a parameter for the Evergreen build # https://github.com/evergreen-ci/evergreen/wiki/Parameterized-Builds om_version = os.environ.get("om_version") @@ -207,139 +278,133 @@ def build_om_image(build_configuration: BuildConfiguration): "om_download_url": om_download_url, } - build_image( - image_name="mongodb-enterprise-ops-manager-ubi", + pipeline_process_image( dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile", build_configuration=build_configuration, - extra_args=args, + dockerfile_args=args, ) -@TRACER.start_as_current_span("build_image_generic") -def build_image( - image_name: str, +def build_image_generic( dockerfile_path: str, - build_configuration: BuildConfiguration, + build_configuration: ImageBuildConfiguration, extra_args: dict | None = None, - build_path: str = ".", + multi_arch_args_list: list[dict] | None = None, ): """ - Build an image then (optionally) sign the result. + Build one or more platform-specific images, then (optionally) + push a manifest and sign the result. """ - # Tracing setup - span = trace.get_current_span() - span.set_attribute("mck.image_name", image_name) - - registry = build_configuration.base_registry - args_list = extra_args or {} - - # merge in the registry without mutating caller's dict - build_args = {**args_list, "quay_registry": registry} - - if build_args: - span.set_attribute("mck.build_args", str(build_args)) - - logger.info(f"Building {image_name}, dockerfile args: {build_args}") - logger.debug(f"Build args: {build_args}") - logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") - logger.debug(f"build image generic - registry={registry}") - # Build docker registry URI and call build_image - docker_registry = f"{build_configuration.base_registry}/{image_name}" - image_full_uri = f"{docker_registry}:{build_configuration.version}" - - execute_docker_build( - tag=image_full_uri, - dockerfile=dockerfile_path, - path=build_path, - args=build_args, - push=True, - platforms=build_configuration.platforms, - ) + registry = build_configuration.registry + image_name = build_configuration.image_name() + args_list = multi_arch_args_list or [extra_args or {}] + version = args_list[0].get("version", "") + platforms = [args.get("architecture") for args in args_list] + + for base_args in args_list: + # merge in the registry without mutating caller’s dict + build_args = {**base_args, "quay_registry": registry} + logger.debug(f"Build args: {build_args}") + + # TODO: why are we iteration over platforms here? this should be multi-arch build + for arch in platforms: + logger.debug(f"Building {image_name} for arch={arch}") + logger.debug(f"build image generic - registry={registry}") + pipeline_process_image( + dockerfile_path=dockerfile_path, + build_configuration=build_configuration, + dockerfile_args=build_args, + ) if build_configuration.sign: - logger.info("Signing image") - sign_image(docker_registry, build_configuration.version) - verify_signature(docker_registry, build_configuration.version) + sign_image(registry, version) + verify_signature(registry, version) -def build_init_appdb(build_configuration: BuildConfiguration): +def build_init_appdb(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} - build_image( - image_name="mongodb-kubernetes-init-appdb", + pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile", build_configuration=build_configuration, - extra_args=args, + dockerfile_args=args, ) # TODO: nam static: remove this once static containers becomes the default -def build_init_database(build_configuration: BuildConfiguration): +def build_init_database(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} - build_image( - "mongodb-kubernetes-init-database", + pipeline_process_image( "docker/mongodb-kubernetes-init-database/Dockerfile", build_configuration=build_configuration, - extra_args=args, + dockerfile_args=args, ) -def build_community_image(build_configuration: BuildConfiguration, image_type: str): +def build_readiness_probe_image(build_configuration: ImageBuildConfiguration): """ - Builds image for community components (readiness probe, upgrade hook). - - Args: - build_configuration: The build configuration to use - image_type: Type of image to build ("readiness-probe" or "upgrade-hook") + Builds image used for readiness probe. """ - if image_type == "readiness-probe": - image_name = "mongodb-kubernetes-readinessprobe" - dockerfile_path = "docker/mongodb-kubernetes-readinessprobe/Dockerfile" - elif image_type == "upgrade-hook": - image_name = "mongodb-kubernetes-operator-version-upgrade-post-start-hook" - dockerfile_path = "docker/mongodb-kubernetes-upgrade-hook/Dockerfile" - else: - raise ValueError(f"Unsupported community image type: {image_type}") - version = build_configuration.version golang_version = os.getenv("GOLANG_VERSION", "1.24") - extra_args = { - "version": version, - "GOLANG_VERSION": golang_version, - } - - build_image( - image_name=image_name, - dockerfile_path=dockerfile_path, + # Extract architectures from platforms for build args + architectures = [platform.split("/")[-1] for platform in build_configuration.platforms] + multi_arch_args_list = [] + + for arch in architectures: + arch_args = { + "version": version, + "GOLANG_VERSION": golang_version, + "architecture": arch, + "TARGETARCH": arch, # TODO: redundant ? + } + multi_arch_args_list.append(arch_args) + + build_image_generic( + dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile", build_configuration=build_configuration, - extra_args=extra_args, + multi_arch_args_list=multi_arch_args_list, ) -def build_readiness_probe_image(build_configuration: BuildConfiguration): +def build_upgrade_hook_image(build_configuration: ImageBuildConfiguration): """ - Builds image used for readiness probe. + Builds image used for version upgrade post-start hook. """ - build_community_image(build_configuration, "readiness-probe") + version = build_configuration.version + golang_version = os.getenv("GOLANG_VERSION", "1.24") -def build_upgrade_hook_image(build_configuration: BuildConfiguration): - """ - Builds image used for version upgrade post-start hook. - """ - build_community_image(build_configuration, "upgrade-hook") + # Extract architectures from platforms for build args + architectures = [platform.split("/")[-1] for platform in build_configuration.platforms] + multi_arch_args_list = [] + + for arch in architectures: + arch_args = { + "version": version, + "GOLANG_VERSION": golang_version, + "architecture": arch, + "TARGETARCH": arch, # TODO: redundant ? + } + multi_arch_args_list.append(arch_args) + + build_image_generic( + dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile", + build_configuration=build_configuration, + multi_arch_args_list=multi_arch_args_list, + ) def build_agent_pipeline( - build_configuration: BuildConfiguration, + build_configuration: ImageBuildConfiguration, image_version, init_database_image, mongodb_tools_url_ubi, @@ -348,6 +413,9 @@ def build_agent_pipeline( ): build_configuration_copy = copy(build_configuration) build_configuration_copy.version = image_version + print( + f"======== Building agent pipeline for version {image_version}, build configuration version: {build_configuration.version}" + ) args = { "version": image_version, "agent_version": agent_version, @@ -356,26 +424,75 @@ def build_agent_pipeline( "init_database_image": init_database_image, "mongodb_tools_url_ubi": mongodb_tools_url_ubi, "mongodb_agent_url_ubi": mongodb_agent_url_ubi, - "quay_registry": build_configuration.base_registry, + "quay_registry": build_configuration.registry, } - build_image( - image_name="mongodb-agent-ubi", + build_image_generic( dockerfile_path="docker/mongodb-agent/Dockerfile", build_configuration=build_configuration_copy, extra_args=args, ) -def build_agent_default_case(build_configuration: BuildConfiguration): +def build_multi_arch_agent_in_sonar( + build_configuration: ImageBuildConfiguration, + image_version, + tools_version, +): + """ + Creates the multi-arch non-operator suffixed version of the agent. + This is a drop-in replacement for the agent + release from MCO. + This should only be called during releases. + Which will lead to a release of the multi-arch + images to quay and ecr. + """ + + logger.info(f"building multi-arch base image for: {image_version}") + args = { + "version": image_version, + "tools_version": tools_version, + } + + arch_arm = { + "agent_distro": "amzn2_aarch64", + "tools_distro": get_tools_distro(tools_version=tools_version)["arm"], + "architecture": "arm64", + } + arch_amd = { + "agent_distro": "rhel9_x86_64", + "tools_distro": get_tools_distro(tools_version=tools_version)["amd"], + "architecture": "amd64", + } + + new_rhel_tool_version = "100.10.0" + if Version(tools_version) >= Version(new_rhel_tool_version): + arch_arm["tools_distro"] = "rhel93-aarch64" + arch_amd["tools_distro"] = "rhel93-x86_64" + + joined_args = [args | arch_amd] + + # Only include arm64 if we shouldn't skip it + if not should_skip_arm64(): + joined_args.append(args | arch_arm) + + build_image_generic( + dockerfile_path="docker/mongodb-agent-non-matrix/Dockerfile", + build_configuration=build_configuration, + multi_arch_args_list=joined_args, + ) + + +def build_agent_default_case(build_configuration: ImageBuildConfiguration): """ Build the agent only for the latest operator for patches and operator releases. + See more information in the function: build_agent_on_agent_bump """ release = load_release_file() # We need to release [all agents x latest operator] on operator releases - if build_configuration.scenario == BuildScenario.RELEASE: + if build_configuration.all_agents: agent_versions_to_build = gather_all_supported_agent_versions(release) # We only need [latest agents (for each OM major version and for CM) x patch ID] for patches else: @@ -392,12 +509,12 @@ def build_agent_default_case(build_configuration: BuildConfiguration): if build_configuration.parallel_factor > 0: max_workers = build_configuration.parallel_factor with ProcessPoolExecutor(max_workers=max_workers) as executor: - logger.info(f"Running with factor of {max_workers}") - logger.info(f"======= Agent versions to build {agent_versions_to_build} =======") - for idx, agent_version in enumerate(agent_versions_to_build): + logger.info(f"running with factor of {max_workers}") + print(f"======= Versions to build {agent_versions_to_build} =======") + for agent_version in agent_versions_to_build: # We don't need to keep create and push the same image on every build. # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. - logger.info(f"======= Building Agent {agent_version} ({idx}/{len(agent_versions_to_build)})") + print(f"======= Building Agent {agent_version} =======") _build_agent_operator( agent_version, build_configuration, @@ -409,6 +526,76 @@ def build_agent_default_case(build_configuration: BuildConfiguration): queue_exception_handling(tasks_queue) +def build_agent_on_agent_bump(build_configuration: ImageBuildConfiguration): + """ + Build the agent matrix (operator version x agent version), triggered by PCT. + + We have three cases where we need to build the agent: + - e2e test runs + - operator releases + - OM/CM bumps via PCT + + We don’t require building a full matrix on e2e test runs and operator releases. + "Operator releases" and "e2e test runs" require only the latest operator x agents + + In OM/CM bumps, we release a new agent which we potentially require to release to older operators as well. + This function takes care of that. + """ + release = load_release_file() + is_release = build_configuration.is_release_scenario() + + if build_configuration.all_agents: + # We need to release [all agents x latest operator] on operator releases to make e2e tests work + # This was changed previously in https://github.com/mongodb/mongodb-kubernetes/pull/3960 + agent_versions_to_build = gather_all_supported_agent_versions(release) + else: + # we only need to release the latest images, we don't need to re-push old images, as we don't clean them up anymore. + agent_versions_to_build = gather_latest_agent_versions(release) + + legacy_agent_versions_to_build = release["supportedImages"]["mongodb-agent"]["versions"] + + tasks_queue = Queue() + max_workers = 1 + if build_configuration.parallel: + max_workers = None + if build_configuration.parallel_factor > 0: + max_workers = build_configuration.parallel_factor + with ProcessPoolExecutor(max_workers=max_workers) as executor: + logger.info(f"running with factor of {max_workers}") + + # We need to regularly push legacy agents, otherwise ecr lifecycle policy will expire them. + # We only need to push them once in a while to ecr, so no quay required + if not is_release: + for legacy_agent in legacy_agent_versions_to_build: + tasks_queue.put( + executor.submit( + build_multi_arch_agent_in_sonar, + build_configuration, + legacy_agent, + # we assume that all legacy agents are build using that tools version + "100.9.4", + ) + ) + + for agent_version in agent_versions_to_build: + # We don't need to keep create and push the same image on every build. + # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. + if build_configuration.all_agents: + tasks_queue.put( + executor.submit( + build_multi_arch_agent_in_sonar, + build_configuration, + agent_version[0], + agent_version[1], + ) + ) + for operator_version in get_supported_operator_versions(): + logger.info(f"Building Agent versions: {agent_version} for Operator versions: {operator_version}") + _build_agent_operator(agent_version, build_configuration, executor, operator_version, tasks_queue) + + queue_exception_handling(tasks_queue) + + def queue_exception_handling(tasks_queue): exceptions_found = False for task in tasks_queue.queue: @@ -423,7 +610,7 @@ def queue_exception_handling(tasks_queue): def _build_agent_operator( agent_version: Tuple[str, str], - build_configuration: BuildConfiguration, + build_configuration: ImageBuildConfiguration, executor: ProcessPoolExecutor, operator_version: str, tasks_queue: Queue, @@ -436,7 +623,7 @@ def _build_agent_operator( f"https://downloads.mongodb.org/tools/db/mongodb-database-tools-{tools_distro}-{tools_version}.tgz" ) mongodb_agent_url_ubi = f"https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-{agent_version[0]}.{agent_distro}.tar.gz" - init_database_image = f"{build_configuration.base_registry}/mongodb-kubernetes-init-database:{operator_version}" + init_database_image = f"{build_configuration.registry}/mongodb-kubernetes-init-database:{operator_version}" tasks_queue.put( executor.submit( diff --git a/scripts/release/build/build_info.py b/scripts/release/build/build_info.py index 37222223c..cd6405967 100644 --- a/scripts/release/build/build_info.py +++ b/scripts/release/build/build_info.py @@ -90,7 +90,11 @@ def load_build_info(scenario: BuildScenario, images = {} for name, env_data in build_info["images"].items(): - data = env_data[scenario] + data = env_data.get(scenario) + if not data: + # If no data is available for the scenario, skip this image + continue + # Only update the image_version if it is not already set in the build_info.json file image_version = data.get("version") if not image_version: @@ -100,12 +104,20 @@ def load_build_info(scenario: BuildScenario, binaries = {} for name, env_data in build_info["binaries"].items(): - data = env_data[scenario] + data = env_data.get(scenario) + if not data: + # If no data is available for the scenario, skip this binary + continue + binaries[name] = BinaryInfo(s3_store=data["s3-store"], platforms=data["platforms"], version=version) helm_charts = {} for name, env_data in build_info["helm-charts"].items(): - data = env_data[scenario] + data = env_data.get(scenario) + if not data: + # If no data is available for the scenario, skip this helm-chart + continue + helm_charts[name] = HelmChartInfo(repository=data["repository"], version=version) return BuildInfo(images=images, binaries=binaries, helm_charts=helm_charts) diff --git a/scripts/release/build/build_scenario.py b/scripts/release/build/build_scenario.py index 9dc28b8af..a8a65bea3 100644 --- a/scripts/release/build/build_scenario.py +++ b/scripts/release/build/build_scenario.py @@ -1,5 +1,6 @@ import os from enum import StrEnum +from lib.base_logger import logger from git import Repo @@ -12,6 +13,31 @@ class BuildScenario(StrEnum): RELEASE = "release" # Official release triggered by a git tag PATCH = "patch" # CI build for a patch/pull request STAGING = "staging" # CI build from a merge to the master + DEVELOPMENT = "development" # Local build on a developer machine + + @classmethod + def infer_scenario_from_environment(cls) -> "BuildScenario": + """Infer the build scenario from environment variables.""" + git_tag = os.getenv("triggered_by_git_tag") + is_patch = os.getenv("is_patch", "false").lower() == "true" + is_evg = os.getenv("RUNNING_IN_EVG", "false").lower() == "true" + patch_id = os.getenv("version_id") + + if git_tag: + # Release scenario and the git tag will be used for promotion process only + scenario = BuildScenario.RELEASE + logger.info(f"Build scenario: {scenario} (git_tag: {git_tag})") + elif is_patch: + scenario = BuildScenario.PATCH + logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") + elif is_evg: + scenario = BuildScenario.STAGING + logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") + else: + scenario = BuildScenario.DEVELOPMENT + logger.info(f"Build scenario: {scenario}") + + return scenario def get_version(self, repository_path: str, changelog_sub_path: str, initial_commit_sha: str = None, initial_version: str = None) -> str: @@ -29,3 +55,6 @@ def get_version(self, repository_path: str, changelog_sub_path: str, initial_com return calculate_next_version(repo, changelog_sub_path, initial_commit_sha, initial_version) raise ValueError(f"Unknown build scenario: {self}") + + def all_agents(self) -> bool: + return self == BuildScenario.RELEASE diff --git a/scripts/release/build/image_build_configuration.py b/scripts/release/build/image_build_configuration.py new file mode 100644 index 000000000..e836690c2 --- /dev/null +++ b/scripts/release/build/image_build_configuration.py @@ -0,0 +1,25 @@ +from dataclasses import dataclass +from typing import List, Optional + +from scripts.release.build_context import BuildScenario + +SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] + + +@dataclass +class ImageBuildConfiguration: + scenario: BuildScenario + version: str + registry: str + + parallel: bool = False + parallel_factor: int = 0 + platforms: Optional[List[str]] = None + sign: bool = False + all_agents: bool = False + + def is_release_scenario(self) -> bool: + return self.scenario == BuildScenario.RELEASE + + def image_name(self) -> str: + return self.registry.split('/')[-1] diff --git a/scripts/release/build_configuration.py b/scripts/release/build_configuration.py deleted file mode 100644 index 2228a6709..000000000 --- a/scripts/release/build_configuration.py +++ /dev/null @@ -1,19 +0,0 @@ -from dataclasses import dataclass -from typing import List, Optional - -from .build_context import BuildScenario - - -@dataclass -class BuildConfiguration: - scenario: BuildScenario - version: str - base_registry: str - - parallel: bool = False - parallel_factor: int = 0 - platforms: Optional[List[str]] = None - sign: bool = False - - def is_release_step_executed(self) -> bool: - return self.scenario == BuildScenario.RELEASE diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py index 143693f46..db2ba104f 100644 --- a/scripts/release/build_context.py +++ b/scripts/release/build_context.py @@ -1,42 +1,8 @@ import os from dataclasses import dataclass -from enum import Enum from typing import Optional -from lib.base_logger import logger - - -class BuildScenario(str, Enum): - """Represents the context in which the build is running.""" - - RELEASE = "release" # Official release triggered by a git tag - PATCH = "patch" # CI build for a patch/pull request - STAGING = "staging" # CI build from a merge to the master branch - DEVELOPMENT = "development" # Local build on a developer machine - - @classmethod - def infer_scenario_from_environment(cls) -> "BuildScenario": - """Infer the build scenario from environment variables.""" - git_tag = os.getenv("triggered_by_git_tag") - is_patch = os.getenv("is_patch", "false").lower() == "true" - is_evg = os.getenv("RUNNING_IN_EVG", "false").lower() == "true" - patch_id = os.getenv("version_id") - - if git_tag: - # Release scenario and the git tag will be used for promotion process only - scenario = BuildScenario.RELEASE - logger.info(f"Build scenario: {scenario} (git_tag: {git_tag})") - elif is_patch: - scenario = BuildScenario.PATCH - logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") - elif is_evg: - scenario = BuildScenario.STAGING - logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") - else: - scenario = BuildScenario.DEVELOPMENT - logger.info(f"Build scenario: {scenario}") - - return scenario +from scripts.release.build.build_scenario import BuildScenario @dataclass @@ -71,7 +37,6 @@ def get_version(self) -> str: return self.git_tag if self.patch_id: return self.patch_id - # Alternatively, we can fail here if no ID is explicitly defined return "latest" def get_base_registry(self) -> str: diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index e2a43683b..6690f9dd5 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -9,6 +9,7 @@ import docker from lib.base_logger import logger +from scripts.evergreen.release.images_signing import sign_image, verify_signature def ecr_login_boto3(region: str, account_id: str): @@ -69,7 +70,7 @@ def ensure_buildx_builder(builder_name: str = "multiarch") -> str: return builder_name -def execute_docker_build( +def build_image( tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, push: bool = True, platforms: list[str] = None ): """ @@ -82,9 +83,6 @@ def execute_docker_build( :param push: Whether to push the image after building :param platforms: List of target platforms (e.g., ["linux/amd64", "linux/arm64"]) """ - # Login to ECR before building - ecr_login_boto3(region="us-east-1", account_id="268558157000") - docker = python_on_whales.docker try: @@ -126,3 +124,34 @@ def execute_docker_build( except Exception as e: logger.error(f"Failed to build image {tag}: {e}") raise RuntimeError(f"Failed to build image {tag}: {str(e)}") + + +def process_image( + image_tag: str, + dockerfile_path: str, + dockerfile_args: Dict[str, str], + registry: str, + platforms: list[str] = None, + sign: bool = False, + build_path: str = ".", + push: bool = True, +): + # Login to ECR + ecr_login_boto3(region="us-east-1", account_id="268558157000") + + image_full_uri = f"{registry}:{image_tag}" + + # Build image with docker buildx + build_image( + tag=image_full_uri, + dockerfile=dockerfile_path, + path=build_path, + args=dockerfile_args, + push=push, + platforms=platforms, + ) + + if sign: + logger.info("Signing image") + sign_image(docker_registry, image_tag) + verify_signature(docker_registry, image_tag) diff --git a/scripts/release/optimized_operator_build.py b/scripts/release/optimized_operator_build.py new file mode 100644 index 000000000..0c5a74b78 --- /dev/null +++ b/scripts/release/optimized_operator_build.py @@ -0,0 +1,87 @@ +import os +import subprocess +import tarfile +from datetime import datetime, timedelta + +import docker +from lib.base_logger import logger +from scripts.release.build.image_build_configuration import ImageBuildConfiguration + + +def copy_into_container(client, src, dst): + """Copies a local file into a running container.""" + + os.chdir(os.path.dirname(src)) + srcname = os.path.basename(src) + with tarfile.open(src + ".tar", mode="w") as tar: + tar.add(srcname) + + name, dst = dst.split(":") + container = client.containers.get(name) + + with open(src + ".tar", "rb") as fd: + container.put_archive(os.path.dirname(dst), fd.read()) + + +def build_operator_image_fast(build_configuration: ImageBuildConfiguration) -> bool: + """This function builds the operator locally and pushed into an existing + Docker image. This is the fastest way I could image we can do this.""" + + client = docker.from_env() + # image that we know is where we build operator. + image_repo = build_configuration.registry + "/" + build_configuration.image_type + "/mongodb-kubernetes" + image_tag = "latest" + repo_tag = image_repo + ":" + image_tag + + logger.debug(f"Pulling image: {repo_tag}") + try: + image = client.images.get(repo_tag) + except docker.errors.ImageNotFound: + logger.debug("Operator image does not exist locally. Building it now") + return False + + logger.debug("Done") + too_old = datetime.now() - timedelta(hours=3) + image_timestamp = datetime.fromtimestamp( + image.history()[0]["Created"] + ) # Layer 0 is the latest added layer to this Docker image. [-1] is the FROM layer. + + if image_timestamp < too_old: + logger.info("Current operator image is too old, will rebuild it completely first") + return False + + container_name = "mongodb-enterprise-operator" + operator_binary_location = "/usr/local/bin/mongodb-kubernetes-operator" + try: + client.containers.get(container_name).remove() + logger.debug(f"Removed {container_name}") + except docker.errors.NotFound: + pass + + container = client.containers.run(repo_tag, name=container_name, entrypoint="sh", detach=True) + + logger.debug("Building operator with debugging symbols") + subprocess.run(["make", "manager"], check=True, stdout=subprocess.PIPE) + logger.debug("Done building the operator") + + copy_into_container( + client, + os.getcwd() + "/docker/mongodb-kubernetes-operator/content/mongodb-kubernetes-operator", + container_name + ":" + operator_binary_location, + ) + + # Commit changes on disk as a tag + container.commit( + repository=image_repo, + tag=image_tag, + ) + # Stop this container so we can use it next time + container.stop() + container.remove() + + logger.info("Pushing operator to {}:{}".format(image_repo, image_tag)) + client.images.push( + repository=image_repo, + tag=image_tag, + ) + return True diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 3f7b9473d..923bc02c7 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -1,6 +1,5 @@ import argparse import os -import sys from typing import Callable, Dict from opentelemetry import context, trace @@ -18,6 +17,7 @@ from lib.base_logger import logger from scripts.release.atomic_pipeline import ( build_agent_default_case, + build_agent_on_agent_bump, build_database_image, build_init_appdb, build_init_database, @@ -25,42 +25,44 @@ build_mco_tests_image, build_om_image, build_operator_image, + build_operator_image_patch, build_readiness_probe_image, build_tests_image, build_upgrade_hook_image, ) -from scripts.release.build_configuration import BuildConfiguration +from scripts.release.build.build_info import load_build_info +from scripts.release.build.image_build_configuration import ( + SUPPORTED_PLATFORMS, + ImageBuildConfiguration, +) from scripts.release.build_context import ( - BuildContext, BuildScenario, ) """ -The goal of main.py, build_configuration.py and build_context.py is to provide a single source of truth for the build +The goal of main.py, image_build_configuration.py and build_context.py is to provide a single source of truth for the build configuration. All parameters that depend on the the build environment (local dev, evg, etc) should be resolved here and not in the pipeline. """ -SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] - def get_builder_function_for_image_name() -> Dict[str, Callable]: """Returns a dictionary of image names that can be built.""" image_builders = { - "test": build_tests_image, - "operator": build_operator_image, - "mco-test": build_mco_tests_image, - "readiness-probe": build_readiness_probe_image, - "upgrade-hook": build_upgrade_hook_image, - "database": build_database_image, + "meko-tests": build_tests_image, # working + "operator": build_operator_image, # working + "mco-tests": build_mco_tests_image, # working + "readiness-probe": build_readiness_probe_image, # working, but still using single arch build + "upgrade-hook": build_upgrade_hook_image, # working, but still using single arch build + "operator-quick": build_operator_image_patch, # TODO: remove this image, it is not used anymore + "database": build_database_image, # working + "agent-pct": build_agent_on_agent_bump, "agent": build_agent_default_case, - # # Init images - "init-appdb": build_init_appdb, - "init-database": build_init_database, - "init-ops-manager": build_init_om_image, - # + "init-appdb": build_init_appdb, # working + "init-database": build_init_database, # working + "init-ops-manager": build_init_om_image, # working # Ops Manager image "ops-manager": build_om_image, } @@ -68,11 +70,57 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: return image_builders -def build_image(image_name: str, build_configuration: BuildConfiguration): +def build_image(image_name: str, build_configuration: ImageBuildConfiguration): """Builds one of the supported images by its name.""" + if image_name not in get_builder_function_for_image_name(): + raise ValueError( + f"Image '{image_name}' is not supported. Supported images: {', '.join(get_builder_function_for_image_name().keys())}" + ) get_builder_function_for_image_name()[image_name](build_configuration) +def image_build_config_from_args(args) -> ImageBuildConfiguration: + image = args.image + + build_scenario = BuildScenario(args.scenario) or BuildScenario.infer_scenario_from_environment() + + build_info = load_build_info(build_scenario) + image_build_info = build_info.images.get(image) + if not image_build_info: + raise ValueError(f"Image '{image}' is not defined in the build info for scenario '{build_scenario}'") + + # Resolve final values with overrides + # TODO: cover versions for agents and OM images + version = args.version or image_build_info.version + registry = args.registry or image_build_info.repository + platforms = get_platforms_from_arg(args) or image_build_info.platforms + # TODO: add sign to build_info.json + sign = args.sign + # TODO: remove "all_agents" from context and environment variables support (not needed anymore) + all_agents = args.all_agents or build_scenario.all_agents() + + return ImageBuildConfiguration( + scenario=build_scenario, + version=version, + registry=registry, + parallel=args.parallel, + platforms=platforms, + sign=sign, + all_agents=all_agents, + parallel_factor=args.parallel_factor, + ) + + +def get_platforms_from_arg(args): + """Parse and validate the --platform argument""" + platforms = [p.strip() for p in args.platform.split(",")] + if any(p not in SUPPORTED_PLATFORMS for p in platforms): + raise ValueError( + f"Unsupported platform in --platforms '{args.platform}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}" + ) + return platforms + + def _setup_tracing(): trace_id = os.environ.get("otel_trace_id") parent_id = os.environ.get("otel_parent_id") @@ -105,13 +153,11 @@ def _setup_tracing(): def main(): - _setup_tracing() parser = argparse.ArgumentParser(description="Build container images.") parser.add_argument("image", help="Image to build.") # Required parser.add_argument("--parallel", action="store_true", help="Build images in parallel.") parser.add_argument("--debug", action="store_true", help="Enable debug logging.") - parser.add_argument("--sign", action="store_true", help="Sign images.") parser.add_argument( "--scenario", choices=list(BuildScenario), @@ -120,8 +166,7 @@ def main(): # Override arguments for build context and configuration parser.add_argument( "--platform", - default="linux/amd64", - help="Target platforms for multi-arch builds (comma-separated). Example: linux/amd64,linux/arm64. Defaults to linux/amd64.", + help="Override the platforms instead of resolving from build scenario", ) parser.add_argument( "--version", @@ -131,7 +176,16 @@ def main(): "--registry", help="Override the base registry instead of resolving from build scenario", ) - # For agent builds + parser.add_argument( + "--sign", action="store_true", help="Force signing instead of resolving condition from build scenario" + ) + + # Agent specific arguments + parser.add_argument( + "--all-agents", + action="store_true", + help="Build all agent variants instead of only the latest", + ) parser.add_argument( "--parallel-factor", default=0, @@ -141,48 +195,12 @@ def main(): args = parser.parse_args() - build_config = build_config_from_args(args) + build_config = image_build_config_from_args(args) logger.info(f"Building image: {args.image}") logger.info(f"Build configuration: {build_config}") build_image(args.image, build_config) -def build_config_from_args(args): - # Validate that the image name is supported - supported_images = get_builder_function_for_image_name().keys() - if args.image not in supported_images: - logger.error(f"Unsupported image '{args.image}'. Supported images: {', '.join(supported_images)}") - sys.exit(1) - - # Parse platform argument (comma-separated) - platforms = [p.strip() for p in args.platform.split(",")] - if any(p not in SUPPORTED_PLATFORMS for p in platforms): - logger.error( - f"Unsupported platform in '{args.platform}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}" - ) - sys.exit(1) - - # Centralized configuration management with overrides - build_scenario = args.scenario or BuildScenario.infer_scenario_from_environment() - build_context = BuildContext.from_scenario(build_scenario) - - # Resolve final values with overrides - scenario = args.scenario or build_context.scenario - version = args.version or build_context.get_version() - registry = args.registry or build_context.get_base_registry() - sign = args.sign or build_context.signing_enabled - - return BuildConfiguration( - scenario=scenario, - version=version, - base_registry=registry, - parallel=args.parallel, - platforms=platforms, - sign=sign, - parallel_factor=args.parallel_factor, - ) - - if __name__ == "__main__": main() From 8a4e4b7180577bbed371fde4aa83cc454fb5eab4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Wed, 6 Aug 2025 13:39:24 +0200 Subject: [PATCH 093/164] merge redesign-pipeline branch --- scripts/release/atomic_pipeline.py | 224 +++--------------- scripts/release/build/build_scenario.py | 17 +- .../build/image_build_configuration.py | 2 +- scripts/release/build_context.py | 48 ---- scripts/release/pipeline_main.py | 3 +- 5 files changed, 38 insertions(+), 256 deletions(-) delete mode 100644 scripts/release/build_context.py diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 0f2caedb5..e0d424602 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -9,7 +9,7 @@ from concurrent.futures import ProcessPoolExecutor from copy import copy from queue import Queue -from typing import Dict, List, Optional, Tuple, Union +from typing import Dict, List, Optional, Tuple import requests import semver @@ -17,30 +17,15 @@ from packaging.version import Version from lib.base_logger import logger -from scripts.evergreen.release.agent_matrix import ( - get_supported_operator_versions, -) from scripts.evergreen.release.images_signing import ( sign_image, verify_signature, ) from scripts.release.build.image_build_configuration import ImageBuildConfiguration - from .build_images import process_image from .optimized_operator_build import build_operator_image_fast TRACER = trace.get_tracer("evergreen-agent") -DEFAULT_NAMESPACE = "default" - - -def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: - if value is None: - return [] - - if isinstance(value, str): - return [e.strip() for e in value.split(",")] - - return value def get_tools_distro(tools_version: str) -> Dict[str, str]: @@ -55,11 +40,6 @@ def is_running_in_evg_pipeline(): return os.getenv("RUNNING_IN_EVG", "") == "true" -def is_running_in_patch(): - is_patch = os.environ.get("is_patch") - return is_patch is not None and is_patch.lower() == "true" - - def load_release_file() -> Dict: with open("release.json") as release: return json.load(release) @@ -190,14 +170,6 @@ def build_database_image(build_configuration: ImageBuildConfiguration): ) -def should_skip_arm64(): - """ - Determines if arm64 builds should be skipped based on environment. - Returns True if running in Evergreen pipeline as a patch. - """ - return is_running_in_evg_pipeline() and is_running_in_patch() - - @TRACER.start_as_current_span("sign_image_in_repositories") def sign_image_in_repositories(args: Dict[str, str], arch: str = None): span = trace.get_current_span() @@ -289,7 +261,6 @@ def build_image_generic( dockerfile_path: str, build_configuration: ImageBuildConfiguration, extra_args: dict | None = None, - multi_arch_args_list: list[dict] | None = None, ): """ Build one or more platform-specific images, then (optionally) @@ -298,24 +269,20 @@ def build_image_generic( registry = build_configuration.registry image_name = build_configuration.image_name() - args_list = multi_arch_args_list or [extra_args or {}] - version = args_list[0].get("version", "") - platforms = [args.get("architecture") for args in args_list] - - for base_args in args_list: - # merge in the registry without mutating caller’s dict - build_args = {**base_args, "quay_registry": registry} - logger.debug(f"Build args: {build_args}") - - # TODO: why are we iteration over platforms here? this should be multi-arch build - for arch in platforms: - logger.debug(f"Building {image_name} for arch={arch}") - logger.debug(f"build image generic - registry={registry}") - pipeline_process_image( - dockerfile_path=dockerfile_path, - build_configuration=build_configuration, - dockerfile_args=build_args, - ) + args_list = extra_args or {} + version = args_list.get("version", "") + + # merge in the registry without mutating caller’s dict + build_args = {**args_list, "quay_registry": registry} + logger.debug(f"Build args: {build_args}") + + logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") + logger.debug(f"build image generic - registry={registry}") + pipeline_process_image( + dockerfile_path=dockerfile_path, + build_configuration=build_configuration, + dockerfile_args=build_args, + ) if build_configuration.sign: sign_image(registry, version) @@ -352,26 +319,17 @@ def build_readiness_probe_image(build_configuration: ImageBuildConfiguration): Builds image used for readiness probe. """ - version = build_configuration.version golang_version = os.getenv("GOLANG_VERSION", "1.24") - # Extract architectures from platforms for build args - architectures = [platform.split("/")[-1] for platform in build_configuration.platforms] - multi_arch_args_list = [] - - for arch in architectures: - arch_args = { - "version": version, - "GOLANG_VERSION": golang_version, - "architecture": arch, - "TARGETARCH": arch, # TODO: redundant ? - } - multi_arch_args_list.append(arch_args) + extra_args = { + "version": build_configuration.version, + "GOLANG_VERSION": golang_version, + } build_image_generic( dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile", build_configuration=build_configuration, - multi_arch_args_list=multi_arch_args_list, + extra_args=extra_args, ) @@ -380,26 +338,17 @@ def build_upgrade_hook_image(build_configuration: ImageBuildConfiguration): Builds image used for version upgrade post-start hook. """ - version = build_configuration.version golang_version = os.getenv("GOLANG_VERSION", "1.24") - # Extract architectures from platforms for build args - architectures = [platform.split("/")[-1] for platform in build_configuration.platforms] - multi_arch_args_list = [] - - for arch in architectures: - arch_args = { - "version": version, - "GOLANG_VERSION": golang_version, - "architecture": arch, - "TARGETARCH": arch, # TODO: redundant ? - } - multi_arch_args_list.append(arch_args) + extra_args = { + "version": build_configuration.version, + "GOLANG_VERSION": golang_version, + } build_image_generic( dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile", build_configuration=build_configuration, - multi_arch_args_list=multi_arch_args_list, + extra_args=extra_args, ) @@ -434,55 +383,6 @@ def build_agent_pipeline( ) -def build_multi_arch_agent_in_sonar( - build_configuration: ImageBuildConfiguration, - image_version, - tools_version, -): - """ - Creates the multi-arch non-operator suffixed version of the agent. - This is a drop-in replacement for the agent - release from MCO. - This should only be called during releases. - Which will lead to a release of the multi-arch - images to quay and ecr. - """ - - logger.info(f"building multi-arch base image for: {image_version}") - args = { - "version": image_version, - "tools_version": tools_version, - } - - arch_arm = { - "agent_distro": "amzn2_aarch64", - "tools_distro": get_tools_distro(tools_version=tools_version)["arm"], - "architecture": "arm64", - } - arch_amd = { - "agent_distro": "rhel9_x86_64", - "tools_distro": get_tools_distro(tools_version=tools_version)["amd"], - "architecture": "amd64", - } - - new_rhel_tool_version = "100.10.0" - if Version(tools_version) >= Version(new_rhel_tool_version): - arch_arm["tools_distro"] = "rhel93-aarch64" - arch_amd["tools_distro"] = "rhel93-x86_64" - - joined_args = [args | arch_amd] - - # Only include arm64 if we shouldn't skip it - if not should_skip_arm64(): - joined_args.append(args | arch_arm) - - build_image_generic( - dockerfile_path="docker/mongodb-agent-non-matrix/Dockerfile", - build_configuration=build_configuration, - multi_arch_args_list=joined_args, - ) - - def build_agent_default_case(build_configuration: ImageBuildConfiguration): """ Build the agent only for the latest operator for patches and operator releases. @@ -511,10 +411,10 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration): with ProcessPoolExecutor(max_workers=max_workers) as executor: logger.info(f"running with factor of {max_workers}") print(f"======= Versions to build {agent_versions_to_build} =======") - for agent_version in agent_versions_to_build: + for idx, agent_version in enumerate(agent_versions_to_build): # We don't need to keep create and push the same image on every build. # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. - print(f"======= Building Agent {agent_version} =======") + print(f"======= Building Agent {agent_version} ({idx}/{len(agent_versions_to_build)})") _build_agent_operator( agent_version, build_configuration, @@ -526,76 +426,6 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration): queue_exception_handling(tasks_queue) -def build_agent_on_agent_bump(build_configuration: ImageBuildConfiguration): - """ - Build the agent matrix (operator version x agent version), triggered by PCT. - - We have three cases where we need to build the agent: - - e2e test runs - - operator releases - - OM/CM bumps via PCT - - We don’t require building a full matrix on e2e test runs and operator releases. - "Operator releases" and "e2e test runs" require only the latest operator x agents - - In OM/CM bumps, we release a new agent which we potentially require to release to older operators as well. - This function takes care of that. - """ - release = load_release_file() - is_release = build_configuration.is_release_scenario() - - if build_configuration.all_agents: - # We need to release [all agents x latest operator] on operator releases to make e2e tests work - # This was changed previously in https://github.com/mongodb/mongodb-kubernetes/pull/3960 - agent_versions_to_build = gather_all_supported_agent_versions(release) - else: - # we only need to release the latest images, we don't need to re-push old images, as we don't clean them up anymore. - agent_versions_to_build = gather_latest_agent_versions(release) - - legacy_agent_versions_to_build = release["supportedImages"]["mongodb-agent"]["versions"] - - tasks_queue = Queue() - max_workers = 1 - if build_configuration.parallel: - max_workers = None - if build_configuration.parallel_factor > 0: - max_workers = build_configuration.parallel_factor - with ProcessPoolExecutor(max_workers=max_workers) as executor: - logger.info(f"running with factor of {max_workers}") - - # We need to regularly push legacy agents, otherwise ecr lifecycle policy will expire them. - # We only need to push them once in a while to ecr, so no quay required - if not is_release: - for legacy_agent in legacy_agent_versions_to_build: - tasks_queue.put( - executor.submit( - build_multi_arch_agent_in_sonar, - build_configuration, - legacy_agent, - # we assume that all legacy agents are build using that tools version - "100.9.4", - ) - ) - - for agent_version in agent_versions_to_build: - # We don't need to keep create and push the same image on every build. - # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. - if build_configuration.all_agents: - tasks_queue.put( - executor.submit( - build_multi_arch_agent_in_sonar, - build_configuration, - agent_version[0], - agent_version[1], - ) - ) - for operator_version in get_supported_operator_versions(): - logger.info(f"Building Agent versions: {agent_version} for Operator versions: {operator_version}") - _build_agent_operator(agent_version, build_configuration, executor, operator_version, tasks_queue) - - queue_exception_handling(tasks_queue) - - def queue_exception_handling(tasks_queue): exceptions_found = False for task in tasks_queue.queue: diff --git a/scripts/release/build/build_scenario.py b/scripts/release/build/build_scenario.py index a8a65bea3..cc88ebdab 100644 --- a/scripts/release/build/build_scenario.py +++ b/scripts/release/build/build_scenario.py @@ -27,12 +27,13 @@ def infer_scenario_from_environment(cls) -> "BuildScenario": # Release scenario and the git tag will be used for promotion process only scenario = BuildScenario.RELEASE logger.info(f"Build scenario: {scenario} (git_tag: {git_tag})") - elif is_patch: + elif is_patch or is_evg: scenario = BuildScenario.PATCH logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") - elif is_evg: - scenario = BuildScenario.STAGING - logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") + # TODO: Uncomment the following lines when starting to work on staging builds + # elif is_evg: + # scenario = BuildScenario.STAGING + # logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") else: scenario = BuildScenario.DEVELOPMENT logger.info(f"Build scenario: {scenario}") @@ -45,10 +46,10 @@ def get_version(self, repository_path: str, changelog_sub_path: str, initial_com match self: case BuildScenario.PATCH: - build_id = os.environ["BUILD_ID"] - if not build_id: - raise ValueError(f"BUILD_ID environment variable is not set for `{self}` build scenario") - return build_id + patch_id = os.getenv("version_id") + if not patch_id: + raise ValueError(f"version_id environment variable is not set for `{self}` build scenario") + return patch_id case BuildScenario.STAGING: return repo.head.object.hexsha[:COMMIT_SHA_LENGTH] case BuildScenario.RELEASE: diff --git a/scripts/release/build/image_build_configuration.py b/scripts/release/build/image_build_configuration.py index e836690c2..750a69c76 100644 --- a/scripts/release/build/image_build_configuration.py +++ b/scripts/release/build/image_build_configuration.py @@ -1,7 +1,7 @@ from dataclasses import dataclass from typing import List, Optional -from scripts.release.build_context import BuildScenario +from scripts.release.build.build_scenario import BuildScenario SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py deleted file mode 100644 index db2ba104f..000000000 --- a/scripts/release/build_context.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -from dataclasses import dataclass -from typing import Optional - -from scripts.release.build.build_scenario import BuildScenario - - -@dataclass -class BuildContext: - """Define build parameters based on the build scenario.""" - - scenario: BuildScenario - git_tag: Optional[str] = None - patch_id: Optional[str] = None - signing_enabled: bool = False - multi_arch: bool = True - version: Optional[str] = None - - @classmethod - def from_scenario(cls, scenario: BuildScenario) -> "BuildContext": - """Create build context from a given scenario.""" - git_tag = os.getenv("triggered_by_git_tag") - patch_id = os.getenv("version_id") - signing_enabled = scenario == BuildScenario.RELEASE - - return cls( - scenario=scenario, - git_tag=git_tag, - patch_id=patch_id, - signing_enabled=signing_enabled, - version=git_tag or patch_id, - ) - - def get_version(self) -> str: - """Gets the version that will be used to tag the images.""" - if self.scenario == BuildScenario.RELEASE: - return self.git_tag - if self.patch_id: - return self.patch_id - return "latest" - - def get_base_registry(self) -> str: - """Get the base registry URL for the current scenario.""" - # TODO CLOUDP-335471: when working on the promotion process, use the prod registry variable in RELEASE scenario - if self.scenario == BuildScenario.STAGING: - return os.environ.get("STAGING_REPO_URL") - else: - return os.environ.get("BASE_REPO_URL") diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 923bc02c7..94d083adf 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -17,7 +17,6 @@ from lib.base_logger import logger from scripts.release.atomic_pipeline import ( build_agent_default_case, - build_agent_on_agent_bump, build_database_image, build_init_appdb, build_init_database, @@ -35,7 +34,7 @@ SUPPORTED_PLATFORMS, ImageBuildConfiguration, ) -from scripts.release.build_context import ( +from scripts.release.build.build_scenario import ( BuildScenario, ) From 5bfacf66a0843660d59faf755791e96b477d30ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Wed, 6 Aug 2025 14:47:17 +0200 Subject: [PATCH 094/164] Fixes after merging remote branch --- scripts/release/atomic_pipeline.py | 95 ++++++------------- .../image_build_process.py} | 41 +++----- scripts/release/pipeline_main.py | 6 +- 3 files changed, 46 insertions(+), 96 deletions(-) rename scripts/release/{build_images.py => build/image_build_process.py} (82%) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index e0d424602..479f04039 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -1,8 +1,7 @@ #!/usr/bin/env python3 -"""This pipeline script knows about the details of our Docker images -and where to fetch and calculate parameters. It uses Sonar.py -to produce the final images.""" +"""This atomic_pipeline script knows about the details of our Docker images +and where to fetch and calculate parameters.""" import json import os import shutil @@ -22,7 +21,8 @@ verify_signature, ) from scripts.release.build.image_build_configuration import ImageBuildConfiguration -from .build_images import process_image +from scripts.release.build.image_build_process import build_image + from .optimized_operator_build import build_operator_image_fast TRACER = trace.get_tracer("evergreen-agent") @@ -36,10 +36,6 @@ def get_tools_distro(tools_version: str) -> Dict[str, str]: return default_distro -def is_running_in_evg_pipeline(): - return os.getenv("RUNNING_IN_EVG", "") == "true" - - def load_release_file() -> Dict: with open("release.json") as release: return json.load(release) @@ -59,27 +55,37 @@ def pipeline_process_image( if dockerfile_args: span.set_attribute("mck.build_args", str(dockerfile_args)) - logger.info(f"Dockerfile args: {dockerfile_args}, for image: {image_name}") - if not dockerfile_args: dockerfile_args = {} - logger.debug(f"Build args: {dockerfile_args}") - process_image( + logger.info(f"Dockerfile args: {dockerfile_args}, for image: {image_name}") + + build_image( image_tag=build_configuration.version, dockerfile_path=dockerfile_path, dockerfile_args=dockerfile_args, registry=build_configuration.registry, platforms=build_configuration.platforms, - sign=build_configuration.sign, build_path=build_path, ) + if build_configuration.sign: + pipeline_sign_image( + registry=build_configuration.registry, + version=build_configuration.version, + ) + + +@TRACER.start_as_current_span("sign_image_in_repositories") +def pipeline_sign_image(registry: str, version: str): + logger.info("Signing image") + sign_image(registry, version) + verify_signature(registry, version) + def build_tests_image(build_configuration: ImageBuildConfiguration): """ Builds image used to run tests. """ - image_name = "mongodb-kubernetes-tests" # helm directory needs to be copied over to the tests docker context. helm_src = "helm_chart" @@ -170,20 +176,6 @@ def build_database_image(build_configuration: ImageBuildConfiguration): ) -@TRACER.start_as_current_span("sign_image_in_repositories") -def sign_image_in_repositories(args: Dict[str, str], arch: str = None): - span = trace.get_current_span() - repository = args["quay_registry"] + args["ubi_suffix"] - tag = args["release_version"] - if arch: - tag = f"{tag}-{arch}" - - span.set_attribute("mck.tag", tag) - - sign_image(repository, tag) - verify_signature(repository, tag) - - def find_om_in_releases(om_version: str, releases: Dict[str, str]) -> Optional[str]: """ There are a few alternatives out there that allow for json-path or xpath-type @@ -257,43 +249,12 @@ def build_om_image(build_configuration: ImageBuildConfiguration): ) -def build_image_generic( - dockerfile_path: str, - build_configuration: ImageBuildConfiguration, - extra_args: dict | None = None, -): - """ - Build one or more platform-specific images, then (optionally) - push a manifest and sign the result. - """ - - registry = build_configuration.registry - image_name = build_configuration.image_name() - args_list = extra_args or {} - version = args_list.get("version", "") - - # merge in the registry without mutating caller’s dict - build_args = {**args_list, "quay_registry": registry} - logger.debug(f"Build args: {build_args}") - - logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") - logger.debug(f"build image generic - registry={registry}") - pipeline_process_image( - dockerfile_path=dockerfile_path, - build_configuration=build_configuration, - dockerfile_args=build_args, - ) - - if build_configuration.sign: - sign_image(registry, version) - verify_signature(registry, version) - - def build_init_appdb(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} + pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile", build_configuration=build_configuration, @@ -326,10 +287,10 @@ def build_readiness_probe_image(build_configuration: ImageBuildConfiguration): "GOLANG_VERSION": golang_version, } - build_image_generic( + pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile", build_configuration=build_configuration, - extra_args=extra_args, + dockerfile_args=extra_args, ) @@ -345,10 +306,10 @@ def build_upgrade_hook_image(build_configuration: ImageBuildConfiguration): "GOLANG_VERSION": golang_version, } - build_image_generic( + pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile", build_configuration=build_configuration, - extra_args=extra_args, + dockerfile_args=extra_args, ) @@ -373,13 +334,13 @@ def build_agent_pipeline( "init_database_image": init_database_image, "mongodb_tools_url_ubi": mongodb_tools_url_ubi, "mongodb_agent_url_ubi": mongodb_agent_url_ubi, - "quay_registry": build_configuration.registry, + "quay_registry": build_configuration_copy.registry, } - build_image_generic( + pipeline_process_image( dockerfile_path="docker/mongodb-agent/Dockerfile", build_configuration=build_configuration_copy, - extra_args=args, + dockerfile_args=args, ) diff --git a/scripts/release/build_images.py b/scripts/release/build/image_build_process.py similarity index 82% rename from scripts/release/build_images.py rename to scripts/release/build/image_build_process.py index 6690f9dd5..cf474ee3b 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build/image_build_process.py @@ -9,7 +9,6 @@ import docker from lib.base_logger import logger -from scripts.evergreen.release.images_signing import sign_image, verify_signature def ecr_login_boto3(region: str, account_id: str): @@ -47,16 +46,16 @@ def ensure_buildx_builder(builder_name: str = "multiarch") -> str: :return: The builder name that was created or reused """ - docker = python_on_whales.docker + docker_cmd = python_on_whales.docker - existing_builders = docker.buildx.list() + existing_builders = docker_cmd.buildx.list() if any(b.name == builder_name for b in existing_builders): logger.info(f"Builder '{builder_name}' already exists – reusing it.") - docker.buildx.use(builder_name) + docker_cmd.buildx.use(builder_name) return builder_name try: - docker.buildx.create( + docker_cmd.buildx.create( name=builder_name, driver="docker-container", use=True, @@ -70,8 +69,8 @@ def ensure_buildx_builder(builder_name: str = "multiarch") -> str: return builder_name -def build_image( - tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, push: bool = True, platforms: list[str] = None +def docker_build_image( + tag: str, dockerfile: str, path: str, args: Dict[str, str], push: bool, platforms: list[str] ): """ Build a Docker image using python_on_whales and Docker Buildx for multi-architecture support. @@ -83,15 +82,11 @@ def build_image( :param push: Whether to push the image after building :param platforms: List of target platforms (e.g., ["linux/amd64", "linux/arm64"]) """ - docker = python_on_whales.docker + docker_cmd = python_on_whales.docker try: # Convert build args to the format expected by python_on_whales - build_args = {k: str(v) for k, v in args.items()} if args else {} - - # Set default platforms if not specified - if platforms is None: - platforms = ["linux/amd64"] + build_args = {k: str(v) for k, v in args.items()} logger.info(f"Building image: {tag}") logger.info(f"Platforms: {platforms}") @@ -107,9 +102,10 @@ def build_image( builder_name = ensure_buildx_builder("multiarch") # Build the image using buildx - docker.buildx.build( + docker_cmd.buildx.build( context_path=path, file=dockerfile, + # TODO: add tag for release builds (OLM immutable tag) tags=[tag], platforms=platforms, builder=builder_name, @@ -126,15 +122,13 @@ def build_image( raise RuntimeError(f"Failed to build image {tag}: {str(e)}") -def process_image( +def build_image( image_tag: str, dockerfile_path: str, dockerfile_args: Dict[str, str], registry: str, - platforms: list[str] = None, - sign: bool = False, - build_path: str = ".", - push: bool = True, + platforms: list[str], + build_path: str, ): # Login to ECR ecr_login_boto3(region="us-east-1", account_id="268558157000") @@ -142,16 +136,11 @@ def process_image( image_full_uri = f"{registry}:{image_tag}" # Build image with docker buildx - build_image( + docker_build_image( tag=image_full_uri, dockerfile=dockerfile_path, path=build_path, args=dockerfile_args, - push=push, + push=True, platforms=platforms, ) - - if sign: - logger.info("Signing image") - sign_image(docker_registry, image_tag) - verify_signature(docker_registry, image_tag) diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 94d083adf..a4553813e 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -30,13 +30,13 @@ build_upgrade_hook_image, ) from scripts.release.build.build_info import load_build_info +from scripts.release.build.build_scenario import ( + BuildScenario, +) from scripts.release.build.image_build_configuration import ( SUPPORTED_PLATFORMS, ImageBuildConfiguration, ) -from scripts.release.build.build_scenario import ( - BuildScenario, -) """ The goal of main.py, image_build_configuration.py and build_context.py is to provide a single source of truth for the build From 9733b45eeb107b719f87d2c78ddbfb183174bb7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Wed, 6 Aug 2025 15:12:30 +0200 Subject: [PATCH 095/164] Add sign option + change staging registries --- build_info.json | 38 +++++-- scripts/release/build/build_info.py | 31 ++++-- scripts/release/build/build_info_test.py | 125 ++++++++++++++++------- scripts/release/build/conftest.py | 10 +- scripts/release/conftest.py | 6 +- scripts/release/pipeline_main.py | 3 +- 6 files changed, 147 insertions(+), 66 deletions(-) diff --git a/build_info.json b/build_info.json index 9f7d173b8..f647424e0 100644 --- a/build_info.json +++ b/build_info.json @@ -8,13 +8,15 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes", "platforms": [ "linux/arm64", "linux/amd64" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes", "platforms": [ "linux/arm64", @@ -30,13 +32,15 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-init-database-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-database", "platforms": [ "linux/arm64", "linux/amd64" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-init-database", "platforms": [ "linux/arm64", @@ -52,13 +56,15 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-init-appdb-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-appdb", "platforms": [ "linux/arm64", "linux/amd64" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-init-appdb", "platforms": [ "linux/arm64", @@ -74,13 +80,15 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-init-ops-manager-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-ops-manager", "platforms": [ "linux/arm64", "linux/amd64" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-init-ops-manager", "platforms": [ "linux/arm64", @@ -96,13 +104,15 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-database-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-database", "platforms": [ "linux/arm64", "linux/amd64" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-database", "platforms": [ "linux/arm64", @@ -118,7 +128,7 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-tests-stg", + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-tests", "platforms": [ "linux/amd64" ] @@ -132,7 +142,7 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-community-tests-stg", + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-community-tests", "platforms": [ "linux/amd64" ] @@ -146,7 +156,8 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-readinessprobe-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe", "platforms": [ "linux/arm64", "linux/amd64" @@ -154,6 +165,7 @@ }, "release": { "version": "1.0.22", + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-readinessprobe", "platforms": [ "linux/arm64", @@ -169,7 +181,8 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": [ "linux/arm64", "linux/amd64" @@ -177,6 +190,7 @@ }, "release": { "version": "1.0.9", + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": [ "linux/arm64", @@ -194,6 +208,7 @@ ] }, "staging": { + "sign": true, "s3-store": "s3://kubectl-mongodb/staging", "platforms": [ "darwin/amd64", @@ -203,6 +218,7 @@ ] }, "release": { + "sign": true, "s3-store": "s3://kubectl-mongodb/prod", "platforms": [ "darwin/amd64", @@ -219,9 +235,11 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/helm-charts" }, "staging": { - "repository": "quay.io/mongodb/helm-charts-stg" + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/helm-charts" }, "release": { + "sign": true, "repository": "quay.io/mongodb/helm-charts" } } diff --git a/scripts/release/build/build_info.py b/scripts/release/build/build_info.py index cd6405967..742ed1413 100644 --- a/scripts/release/build/build_info.py +++ b/scripts/release/build/build_info.py @@ -7,32 +7,35 @@ class ImageInfo(dict): - def __init__(self, repository: str, platforms: list[str], version: str): + def __init__(self, repository: str, platforms: list[str], version: str, sign: bool): super().__init__() self.repository = repository self.platforms = platforms self.version = version + self.sign = sign def to_json(self): return {"repository": self.repository, "platforms": self.platforms, "version": self.version} class BinaryInfo(dict): - def __init__(self, s3_store: str, platforms: list[str], version: str): + def __init__(self, s3_store: str, platforms: list[str], version: str, sign: bool): super().__init__() self.s3_store = s3_store self.platforms = platforms self.version = version + self.sign = sign def to_json(self): return {"platforms": self.platforms, "version": self.version} class HelmChartInfo(dict): - def __init__(self, repository: str, version: str): + def __init__(self, repository: str, version: str, sign: bool): super().__init__() self.repository = repository self.version = version + self.sign = sign def to_json(self): return {"repository": self.repository, "version": self.version} @@ -40,7 +43,7 @@ def to_json(self): class BuildInfo(dict): def __init__( - self, images: Dict[str, ImageInfo], binaries: Dict[str, BinaryInfo], helm_charts: Dict[str, HelmChartInfo] + self, images: Dict[str, ImageInfo], binaries: Dict[str, BinaryInfo], helm_charts: Dict[str, HelmChartInfo] ): super().__init__() self.images = images @@ -100,7 +103,12 @@ def load_build_info(scenario: BuildScenario, if not image_version: image_version = version - images[name] = ImageInfo(repository=data["repository"], platforms=data["platforms"], version=image_version) + images[name] = ImageInfo( + repository=data["repository"], + platforms=data["platforms"], + version=image_version, + sign=data.get("sign", False), + ) binaries = {} for name, env_data in build_info["binaries"].items(): @@ -109,7 +117,12 @@ def load_build_info(scenario: BuildScenario, # If no data is available for the scenario, skip this binary continue - binaries[name] = BinaryInfo(s3_store=data["s3-store"], platforms=data["platforms"], version=version) + binaries[name] = BinaryInfo( + s3_store=data["s3-store"], + platforms=data["platforms"], + version=version, + sign=data.get("sign", False), + ) helm_charts = {} for name, env_data in build_info["helm-charts"].items(): @@ -118,6 +131,10 @@ def load_build_info(scenario: BuildScenario, # If no data is available for the scenario, skip this helm-chart continue - helm_charts[name] = HelmChartInfo(repository=data["repository"], version=version) + helm_charts[name] = HelmChartInfo( + repository=data["repository"], + version=version, + sign=data.get("sign", False), + ) return BuildInfo(images=images, binaries=binaries, helm_charts=helm_charts) diff --git a/scripts/release/build/build_info_test.py b/scripts/release/build/build_info_test.py index 9d33a909e..bb67fbf87 100644 --- a/scripts/release/build/build_info_test.py +++ b/scripts/release/build/build_info_test.py @@ -12,58 +12,79 @@ def test_load_build_info_patch(git_repo: Repo): - build_id = "688364423f9b6c00072b3556" - os.environ["BUILD_ID"] = build_id + patch_id = "688364423f9b6c00072b3556" + os.environ["version_id"] = patch_id expected_build_info = BuildInfo( images={ - "mongodbOperator": ImageInfo( + "operator": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), - "initDatabase": ImageInfo( + "init-database": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), - "initAppDb": ImageInfo( + "init-appdb": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), - "initOpsManager": ImageInfo( + "init-ops-manager": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), "database": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), - "readinessprobe": ImageInfo( + "mco-tests": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-community-tests", + platforms=["linux/amd64"], + version=patch_id, + sign=False, + ), + "meko-tests": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-tests", + platforms=["linux/amd64"], + version=patch_id, + sign=False, + ), + "readiness-probe": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), - "operator-version-upgrade-post-start-hook": ImageInfo( + "upgrade-hook": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-operator-version-upgrade-post-start-hook", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), }, binaries={ "kubectl-mongodb": BinaryInfo( s3_store="s3://kubectl-mongodb/dev", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ) }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/helm-charts", - version=build_id, + version=patch_id, + sign=False, ) }, ) @@ -80,40 +101,59 @@ def test_load_build_info_staging(git_repo: Repo): expected_build_info = BuildInfo( images={ - "mongodbOperator": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-stg", + "operator": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), - "initDatabase": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-init-database-stg", + "init-database": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-database", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), - "initAppDb": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-init-appdb-stg", + "init-appdb": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-appdb", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), - "initOpsManager": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-init-ops-manager-stg", + "init-ops-manager": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-ops-manager", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), "database": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-database-stg", + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-database", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, + ), + "mco-tests": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-community-tests", + platforms=["linux/amd64"], + version=expecter_commit_sha, + sign=False, + ), + "meko-tests": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-tests", + platforms=["linux/amd64"], + version=expecter_commit_sha, + sign=False, ), - "readinessprobe": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-readinessprobe-stg", + "readiness-probe": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), - "operator-version-upgrade-post-start-hook": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook-stg", + "upgrade-hook": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-operator-version-upgrade-post-start-hook", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), }, binaries={ @@ -121,12 +161,14 @@ def test_load_build_info_staging(git_repo: Repo): s3_store="s3://kubectl-mongodb/staging", platforms=["darwin/amd64", "darwin/arm64", "linux/amd64", "linux/arm64"], version=expecter_commit_sha, + sign=True, ) }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( - repository="quay.io/mongodb/helm-charts-stg", + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/helm-charts", version=expecter_commit_sha, + sign=True, ) }, ) @@ -143,40 +185,47 @@ def test_load_build_info_release(git_repo: Repo, readinessprobe_version: str, expected_build_info = BuildInfo( images={ - "mongodbOperator": ImageInfo( + "operator": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), - "initDatabase": ImageInfo( + "init-database": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-init-database", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), - "initAppDb": ImageInfo( + "init-appdb": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-init-appdb", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), - "initOpsManager": ImageInfo( + "init-ops-manager": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-init-ops-manager", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), "database": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-database", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), - "readinessprobe": ImageInfo( + "readiness-probe": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-readinessprobe", platforms=["linux/arm64", "linux/amd64"], version=readinessprobe_version, + sign=True, ), - "operator-version-upgrade-post-start-hook": ImageInfo( + "upgrade-hook": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook", platforms=["linux/arm64", "linux/amd64"], version=operator_version_upgrade_post_start_hook_version, + sign=True, ), }, binaries={ @@ -184,12 +233,14 @@ def test_load_build_info_release(git_repo: Repo, readinessprobe_version: str, s3_store="s3://kubectl-mongodb/prod", platforms=["darwin/amd64", "darwin/arm64", "linux/amd64", "linux/arm64"], version=version, + sign=True, ) }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( repository="quay.io/mongodb/helm-charts", version=version, + sign=True, ) }, ) diff --git a/scripts/release/build/conftest.py b/scripts/release/build/conftest.py index ae820b2da..bdde0952c 100644 --- a/scripts/release/build/conftest.py +++ b/scripts/release/build/conftest.py @@ -9,18 +9,16 @@ def get_manually_upgradable_versions() -> Dict[str, str]: build_info = json.load(f) return { - "readinessprobe": build_info["images"]["readinessprobe"]["release"]["version"], - "operator_version_upgrade_post_start_hook": build_info["images"]["operator-version-upgrade-post-start-hook"][ - "release" - ]["version"], + "readiness-probe": build_info["images"]["readiness-probe"]["release"]["version"], + "upgrade-hook": build_info["images"]["upgrade-hook"]["release"]["version"], } @fixture(scope="module") def readinessprobe_version() -> str: - return get_manually_upgradable_versions()["readinessprobe"] + return get_manually_upgradable_versions()["readiness-probe"] @fixture(scope="module") def operator_version_upgrade_post_start_hook_version() -> str: - return get_manually_upgradable_versions()["operator_version_upgrade_post_start_hook"] + return get_manually_upgradable_versions()["upgrade-hook"] diff --git a/scripts/release/conftest.py b/scripts/release/conftest.py index 76410ba44..57199434e 100644 --- a/scripts/release/conftest.py +++ b/scripts/release/conftest.py @@ -1,8 +1,6 @@ -import json import os import shutil import tempfile -from typing import Dict from _pytest.fixtures import fixture from git import Repo @@ -169,9 +167,9 @@ def add_file(repo_path: str, src_file_path: str, dst_file_path: str | None = Non @fixture(scope="module") def readinessprobe_version() -> str: - return get_manually_upgradable_versions()["readinessprobe"] + return get_manually_upgradable_versions()["readiness-probe"] @fixture(scope="module") def operator_version_upgrade_post_start_hook_version() -> str: - return get_manually_upgradable_versions()["operator_version_upgrade_post_start_hook"] + return get_manually_upgradable_versions()["upgrade-hook"] diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index a4553813e..fb71c0acd 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -93,8 +93,7 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: version = args.version or image_build_info.version registry = args.registry or image_build_info.repository platforms = get_platforms_from_arg(args) or image_build_info.platforms - # TODO: add sign to build_info.json - sign = args.sign + sign = args.sign or image_build_info.sign # TODO: remove "all_agents" from context and environment variables support (not needed anymore) all_agents = args.all_agents or build_scenario.all_agents() From 5ab9c08276a0dcd337b0b895d36bc72a93d8ec5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Wed, 6 Aug 2025 15:44:32 +0200 Subject: [PATCH 096/164] Add agent and ops-manager to build_info.json --- build_info.json | 36 ++++++++++++++++++++++++ scripts/release/atomic_pipeline.py | 9 +++--- scripts/release/build/build_info_test.py | 24 ++++++++++++++++ scripts/release/pipeline_main.py | 8 +++--- 4 files changed, 69 insertions(+), 8 deletions(-) diff --git a/build_info.json b/build_info.json index f647424e0..093ea61be 100644 --- a/build_info.json +++ b/build_info.json @@ -197,6 +197,42 @@ "linux/amd64" ] } + }, + "agent": { + "patch": { + "version": "agent-version-from-release.json", + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", + "platforms": [ + "linux/amd64" + ] + }, + "staging": { + "version": "agent-version-from-release.json", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi", + "platforms": [ + "linux/arm64", + "linux/amd64" + ] + } + }, + "ops-manager": { + "patch": { + "version": "om-version-from-release.json", + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-enterprise-ops-manager", + "platforms": [ + "linux/amd64" + ] + }, + "staging": { + "version": "om-version-from-release.json", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-enterprise-ops-manager", + "platforms": [ + "linux/arm64", + "linux/amd64" + ] + } } }, "binaries": { diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 479f04039..2a8206c37 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -233,6 +233,9 @@ def build_om_image(build_configuration: ImageBuildConfiguration): if om_version is None: raise ValueError("`om_version` should be defined.") + # Set the version in the build configuration (it is not provided in the build_configuration) + build_configuration.version = om_version + om_download_url = os.environ.get("om_download_url", "") if om_download_url == "": om_download_url = find_om_url(om_version) @@ -249,7 +252,7 @@ def build_om_image(build_configuration: ImageBuildConfiguration): ) -def build_init_appdb(build_configuration: ImageBuildConfiguration): +def build_init_appdb_image(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) @@ -263,7 +266,7 @@ def build_init_appdb(build_configuration: ImageBuildConfiguration): # TODO: nam static: remove this once static containers becomes the default -def build_init_database(build_configuration: ImageBuildConfiguration): +def build_init_database_image(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) @@ -329,12 +332,10 @@ def build_agent_pipeline( args = { "version": image_version, "agent_version": agent_version, - "ubi_suffix": "-ubi", "release_version": image_version, "init_database_image": init_database_image, "mongodb_tools_url_ubi": mongodb_tools_url_ubi, "mongodb_agent_url_ubi": mongodb_agent_url_ubi, - "quay_registry": build_configuration_copy.registry, } pipeline_process_image( diff --git a/scripts/release/build/build_info_test.py b/scripts/release/build/build_info_test.py index bb67fbf87..a7ba1b104 100644 --- a/scripts/release/build/build_info_test.py +++ b/scripts/release/build/build_info_test.py @@ -71,6 +71,18 @@ def test_load_build_info_patch(git_repo: Repo): version=patch_id, sign=False, ), + "agent": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", + platforms=["linux/amd64"], + version="agent-version-from-release.json", + sign=False, + ), + "ops-manager": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-enterprise-ops-manager", + platforms=["linux/amd64"], + version="om-version-from-release.json", + sign=False, + ), }, binaries={ "kubectl-mongodb": BinaryInfo( @@ -155,6 +167,18 @@ def test_load_build_info_staging(git_repo: Repo): version=expecter_commit_sha, sign=True, ), + "agent": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi", + platforms=["linux/arm64", "linux/amd64"], + version="agent-version-from-release.json", + sign=True, + ), + "ops-manager": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-enterprise-ops-manager", + platforms=["linux/arm64", "linux/amd64"], + version="om-version-from-release.json", + sign=True, + ), }, binaries={ "kubectl-mongodb": BinaryInfo( diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index fb71c0acd..0d54682e7 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -18,8 +18,8 @@ from scripts.release.atomic_pipeline import ( build_agent_default_case, build_database_image, - build_init_appdb, - build_init_database, + build_init_appdb_image, + build_init_database_image, build_init_om_image, build_mco_tests_image, build_om_image, @@ -59,8 +59,8 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: "agent-pct": build_agent_on_agent_bump, "agent": build_agent_default_case, # Init images - "init-appdb": build_init_appdb, # working - "init-database": build_init_database, # working + "init-appdb": build_init_appdb_image, # working + "init-database": build_init_database_image, # working "init-ops-manager": build_init_om_image, # working # Ops Manager image "ops-manager": build_om_image, From e6d67ca0deaccdcc36f3a81c40005fa3c3b57000 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Wed, 6 Aug 2025 15:57:28 +0200 Subject: [PATCH 097/164] Fix issue with scenario --- scripts/release/pipeline_main.py | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 0d54682e7..85f263adb 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -81,7 +81,7 @@ def build_image(image_name: str, build_configuration: ImageBuildConfiguration): def image_build_config_from_args(args) -> ImageBuildConfiguration: image = args.image - build_scenario = BuildScenario(args.scenario) or BuildScenario.infer_scenario_from_environment() + build_scenario = get_scenario_from_arg(args.scenario) or BuildScenario.infer_scenario_from_environment() build_info = load_build_info(build_scenario) image_build_info = build_info.images.get(image) @@ -92,7 +92,7 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: # TODO: cover versions for agents and OM images version = args.version or image_build_info.version registry = args.registry or image_build_info.repository - platforms = get_platforms_from_arg(args) or image_build_info.platforms + platforms = get_platforms_from_arg(args.platform) or image_build_info.platforms sign = args.sign or image_build_info.sign # TODO: remove "all_agents" from context and environment variables support (not needed anymore) all_agents = args.all_agents or build_scenario.all_agents() @@ -109,12 +109,22 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: ) -def get_platforms_from_arg(args): +def get_scenario_from_arg(args_scenario: str) -> BuildScenario | None: + if args_scenario: + try: + return BuildScenario(args_scenario) + except ValueError as e: + raise ValueError(f"Invalid scenario '{args_scenario}': {e}") + + return None + + +def get_platforms_from_arg(args_platforms: str) -> list[str] | None: """Parse and validate the --platform argument""" - platforms = [p.strip() for p in args.platform.split(",")] + platforms = [p.strip() for p in args_platforms.split(",")] if any(p not in SUPPORTED_PLATFORMS for p in platforms): raise ValueError( - f"Unsupported platform in --platforms '{args.platform}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}" + f"Unsupported platform in --platforms '{args_platforms}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}" ) return platforms From 7a340926dd9c1900fc458fb99fc9b8524820f3ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 10:05:28 +0200 Subject: [PATCH 098/164] Fix for `build_info` computation --- scripts/release/build/build_info.py | 58 ++++++++++-------------- scripts/release/build/build_info_test.py | 6 +-- scripts/release/pipeline_main.py | 3 ++ scripts/release/release_info_test.py | 12 ++--- 4 files changed, 35 insertions(+), 44 deletions(-) diff --git a/scripts/release/build/build_info.py b/scripts/release/build/build_info.py index 742ed1413..fad5a6b14 100644 --- a/scripts/release/build/build_info.py +++ b/scripts/release/build/build_info.py @@ -1,4 +1,5 @@ import json +from dataclasses import dataclass from typing import Dict from scripts.release.build.build_scenario import BuildScenario @@ -6,56 +7,43 @@ get_initial_version, get_initial_commit_sha -class ImageInfo(dict): - def __init__(self, repository: str, platforms: list[str], version: str, sign: bool): - super().__init__() - self.repository = repository - self.platforms = platforms - self.version = version - self.sign = sign +@dataclass +class ImageInfo: + repository: str + platforms: list[str] + version: str + sign: bool def to_json(self): return {"repository": self.repository, "platforms": self.platforms, "version": self.version} -class BinaryInfo(dict): - def __init__(self, s3_store: str, platforms: list[str], version: str, sign: bool): - super().__init__() - self.s3_store = s3_store - self.platforms = platforms - self.version = version - self.sign = sign +@dataclass +class BinaryInfo: + s3_store: str + platforms: list[str] + version: str + sign: bool def to_json(self): return {"platforms": self.platforms, "version": self.version} -class HelmChartInfo(dict): - def __init__(self, repository: str, version: str, sign: bool): - super().__init__() - self.repository = repository - self.version = version - self.sign = sign +@dataclass +class HelmChartInfo: + repository: str + version: str + sign: bool def to_json(self): return {"repository": self.repository, "version": self.version} -class BuildInfo(dict): - def __init__( - self, images: Dict[str, ImageInfo], binaries: Dict[str, BinaryInfo], helm_charts: Dict[str, HelmChartInfo] - ): - super().__init__() - self.images = images - self.binaries = binaries - self.helm_charts = helm_charts - - def __dict__(self): - return { - "images": {name: images.__dict__ for name, images in self.images.items()}, - "binaries": {name: bin.__dict__ for name, bin in self.binaries.items()}, - "helm-charts": {name: chart.__dict__ for name, chart in self.helm_charts.items()}, - } +@dataclass +class BuildInfo: + images: Dict[str, ImageInfo] + binaries: Dict[str, BinaryInfo] + helm_charts: Dict[str, HelmChartInfo] def to_json(self): return { diff --git a/scripts/release/build/build_info_test.py b/scripts/release/build/build_info_test.py index a7ba1b104..bc9d2734a 100644 --- a/scripts/release/build/build_info_test.py +++ b/scripts/release/build/build_info_test.py @@ -103,7 +103,7 @@ def test_load_build_info_patch(git_repo: Repo): build_info = load_build_info(BuildScenario.PATCH, git_repo.working_dir) - assert build_info.__dict__() == expected_build_info.__dict__() + assert build_info == expected_build_info def test_load_build_info_staging(git_repo: Repo): @@ -199,7 +199,7 @@ def test_load_build_info_staging(git_repo: Repo): build_info = load_build_info(BuildScenario.STAGING, git_repo.working_dir) - assert build_info.__dict__() == expected_build_info.__dict__() + assert build_info == expected_build_info def test_load_build_info_release(git_repo: Repo, readinessprobe_version: str, @@ -271,4 +271,4 @@ def test_load_build_info_release(git_repo: Repo, readinessprobe_version: str, build_info = load_build_info(BuildScenario.RELEASE, git_repo.working_dir) - assert build_info.__dict__() == expected_build_info.__dict__() + assert build_info == expected_build_info diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 85f263adb..b3ade357c 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -84,7 +84,10 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: build_scenario = get_scenario_from_arg(args.scenario) or BuildScenario.infer_scenario_from_environment() build_info = load_build_info(build_scenario) + logger.info(f"image is {image}") + logger.info(f"images are {build_info.images}") image_build_info = build_info.images.get(image) + logger.info(f"image_build_info is {image_build_info}") if not image_build_info: raise ValueError(f"Image '{image}' is not defined in the build info for scenario '{build_scenario}'") diff --git a/scripts/release/release_info_test.py b/scripts/release/release_info_test.py index 2f820037a..213f5d8e6 100644 --- a/scripts/release/release_info_test.py +++ b/scripts/release/release_info_test.py @@ -13,22 +13,22 @@ def test_create_release_info_json( expected_json = { "images": { - "mongodbOperator": { + "operator": { "repository": "quay.io/mongodb/mongodb-kubernetes", "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", }, - "initDatabase": { + "init-database": { "repository": "quay.io/mongodb/mongodb-kubernetes-init-database", "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", }, - "initAppDb": { + "init-appdb": { "repository": "quay.io/mongodb/mongodb-kubernetes-init-appdb", "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", }, - "initOpsManager": { + "init-ops-manager": { "repository": "quay.io/mongodb/mongodb-kubernetes-init-ops-manager", "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", @@ -38,12 +38,12 @@ def test_create_release_info_json( "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", }, - "readinessprobe": { + "readiness-probe": { "repository": "quay.io/mongodb/mongodb-kubernetes-readinessprobe", "platforms": ["linux/arm64", "linux/amd64"], "version": readinessprobe_version, }, - "operator-version-upgrade-post-start-hook": { + "upgrade-hook": { "repository": "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": ["linux/arm64", "linux/amd64"], "version": operator_version_upgrade_post_start_hook_version, From a71ccf6af4b84b6c496525178085c1885c033585 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 10:22:05 +0200 Subject: [PATCH 099/164] Pipeline fixes --- scripts/release/pipeline_main.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index b3ade357c..7752e150b 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -92,7 +92,6 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: raise ValueError(f"Image '{image}' is not defined in the build info for scenario '{build_scenario}'") # Resolve final values with overrides - # TODO: cover versions for agents and OM images version = args.version or image_build_info.version registry = args.registry or image_build_info.repository platforms = get_platforms_from_arg(args.platform) or image_build_info.platforms @@ -113,17 +112,19 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: def get_scenario_from_arg(args_scenario: str) -> BuildScenario | None: - if args_scenario: - try: - return BuildScenario(args_scenario) - except ValueError as e: - raise ValueError(f"Invalid scenario '{args_scenario}': {e}") + if not args_scenario: + return None - return None + try: + return BuildScenario(args_scenario) + except ValueError as e: + raise ValueError(f"Invalid scenario '{args_scenario}': {e}") def get_platforms_from_arg(args_platforms: str) -> list[str] | None: - """Parse and validate the --platform argument""" + if not args_platforms: + return None + platforms = [p.strip() for p in args_platforms.split(",")] if any(p not in SUPPORTED_PLATFORMS for p in platforms): raise ValueError( From 3be7731527424f8d42efc8ec6cdc50d2c4a0eaff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 10:27:19 +0200 Subject: [PATCH 100/164] Remove `all-agents` option --- scripts/release/atomic_pipeline.py | 2 +- scripts/release/build/build_scenario.py | 3 --- scripts/release/pipeline_main.py | 3 --- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 2a8206c37..0ad030c17 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -354,7 +354,7 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration): release = load_release_file() # We need to release [all agents x latest operator] on operator releases - if build_configuration.all_agents: + if build_configuration.is_release_scenario(): agent_versions_to_build = gather_all_supported_agent_versions(release) # We only need [latest agents (for each OM major version and for CM) x patch ID] for patches else: diff --git a/scripts/release/build/build_scenario.py b/scripts/release/build/build_scenario.py index cc88ebdab..e5f0e0c22 100644 --- a/scripts/release/build/build_scenario.py +++ b/scripts/release/build/build_scenario.py @@ -56,6 +56,3 @@ def get_version(self, repository_path: str, changelog_sub_path: str, initial_com return calculate_next_version(repo, changelog_sub_path, initial_commit_sha, initial_version) raise ValueError(f"Unknown build scenario: {self}") - - def all_agents(self) -> bool: - return self == BuildScenario.RELEASE diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 7752e150b..73bd14efa 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -96,8 +96,6 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: registry = args.registry or image_build_info.repository platforms = get_platforms_from_arg(args.platform) or image_build_info.platforms sign = args.sign or image_build_info.sign - # TODO: remove "all_agents" from context and environment variables support (not needed anymore) - all_agents = args.all_agents or build_scenario.all_agents() return ImageBuildConfiguration( scenario=build_scenario, @@ -106,7 +104,6 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: parallel=args.parallel, platforms=platforms, sign=sign, - all_agents=all_agents, parallel_factor=args.parallel_factor, ) From 1aae28b122bca44715b4155f16922bc83be85c13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 10:42:58 +0200 Subject: [PATCH 101/164] Add missing `--sign` option --- scripts/release/pipeline_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 73bd14efa..0562e3f4e 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -166,7 +166,7 @@ def main(): parser = argparse.ArgumentParser(description="Build container images.") parser.add_argument("image", help="Image to build.") # Required parser.add_argument("--parallel", action="store_true", help="Build images in parallel.") - parser.add_argument("--debug", action="store_true", help="Enable debug logging.") + parser.add_argument("--sign", action="store_true", help="Sign images.") parser.add_argument( "--scenario", choices=list(BuildScenario), From e00b0e31e3d218fbf0b10ccc1290e4d26305a918 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 11:06:54 +0200 Subject: [PATCH 102/164] Fix agent matrix build --- build_info.json | 2 - scripts/release/atomic_pipeline.py | 142 +++++++++++------------ scripts/release/build/build_info_test.py | 4 +- 3 files changed, 73 insertions(+), 75 deletions(-) diff --git a/build_info.json b/build_info.json index 093ea61be..effa51a4b 100644 --- a/build_info.json +++ b/build_info.json @@ -200,14 +200,12 @@ }, "agent": { "patch": { - "version": "agent-version-from-release.json", "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", "platforms": [ "linux/amd64" ] }, "staging": { - "version": "agent-version-from-release.json", "sign": true, "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi", "platforms": [ diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 0ad030c17..b71f4b345 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -316,35 +316,6 @@ def build_upgrade_hook_image(build_configuration: ImageBuildConfiguration): ) -def build_agent_pipeline( - build_configuration: ImageBuildConfiguration, - image_version, - init_database_image, - mongodb_tools_url_ubi, - mongodb_agent_url_ubi: str, - agent_version, -): - build_configuration_copy = copy(build_configuration) - build_configuration_copy.version = image_version - print( - f"======== Building agent pipeline for version {image_version}, build configuration version: {build_configuration.version}" - ) - args = { - "version": image_version, - "agent_version": agent_version, - "release_version": image_version, - "init_database_image": init_database_image, - "mongodb_tools_url_ubi": mongodb_tools_url_ubi, - "mongodb_agent_url_ubi": mongodb_agent_url_ubi, - } - - pipeline_process_image( - dockerfile_path="docker/mongodb-agent/Dockerfile", - build_configuration=build_configuration_copy, - dockerfile_args=args, - ) - - def build_agent_default_case(build_configuration: ImageBuildConfiguration): """ Build the agent only for the latest operator for patches and operator releases. @@ -388,48 +359,6 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration): queue_exception_handling(tasks_queue) -def queue_exception_handling(tasks_queue): - exceptions_found = False - for task in tasks_queue.queue: - if task.exception() is not None: - exceptions_found = True - logger.fatal(f"The following exception has been found when building: {task.exception()}") - if exceptions_found: - raise Exception( - f"Exception(s) found when processing Agent images. \nSee also previous logs for more info\nFailing the build" - ) - - -def _build_agent_operator( - agent_version: Tuple[str, str], - build_configuration: ImageBuildConfiguration, - executor: ProcessPoolExecutor, - operator_version: str, - tasks_queue: Queue, -): - agent_distro = "rhel9_x86_64" - tools_version = agent_version[1] - tools_distro = get_tools_distro(tools_version)["amd"] - image_version = f"{agent_version[0]}_{operator_version}" - mongodb_tools_url_ubi = ( - f"https://downloads.mongodb.org/tools/db/mongodb-database-tools-{tools_distro}-{tools_version}.tgz" - ) - mongodb_agent_url_ubi = f"https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-{agent_version[0]}.{agent_distro}.tar.gz" - init_database_image = f"{build_configuration.registry}/mongodb-kubernetes-init-database:{operator_version}" - - tasks_queue.put( - executor.submit( - build_agent_pipeline, - build_configuration, - image_version, - init_database_image, - mongodb_tools_url_ubi, - mongodb_agent_url_ubi, - agent_version[0], - ) - ) - - def gather_all_supported_agent_versions(release: Dict) -> List[Tuple[str, str]]: # This is a list of a tuples - agent version and corresponding tools version agent_versions_to_build = list() @@ -489,3 +418,74 @@ def gather_latest_agent_versions(release: Dict) -> List[Tuple[str, str]]: agent_versions_to_build.append(("107.0.12.8669-1", "100.10.0")) return sorted(list(set(agent_versions_to_build))) + + +def _build_agent_operator( + agent_version: Tuple[str, str], + build_configuration: ImageBuildConfiguration, + executor: ProcessPoolExecutor, + operator_version: str, + tasks_queue: Queue, +): + agent_distro = "rhel9_x86_64" + tools_version = agent_version[1] + tools_distro = get_tools_distro(tools_version)["amd"] + image_version = f"{agent_version[0]}_{operator_version}" + mongodb_tools_url_ubi = ( + f"https://downloads.mongodb.org/tools/db/mongodb-database-tools-{tools_distro}-{tools_version}.tgz" + ) + mongodb_agent_url_ubi = f"https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-{agent_version[0]}.{agent_distro}.tar.gz" + init_database_image = f"{build_configuration.registry}/mongodb-kubernetes-init-database:{operator_version}" + + tasks_queue.put( + executor.submit( + build_agent_pipeline, + build_configuration, + image_version, + init_database_image, + mongodb_tools_url_ubi, + mongodb_agent_url_ubi, + agent_version[0], + ) + ) + + +def build_agent_pipeline( + build_configuration: ImageBuildConfiguration, + image_version, + init_database_image, + mongodb_tools_url_ubi, + mongodb_agent_url_ubi: str, + agent_version, +): + build_configuration_copy = copy(build_configuration) + build_configuration_copy.version = image_version + print( + f"======== Building agent pipeline for version {image_version}, build configuration version: {build_configuration.version}" + ) + args = { + "version": image_version, + "agent_version": agent_version, + "release_version": image_version, + "init_database_image": init_database_image, + "mongodb_tools_url_ubi": mongodb_tools_url_ubi, + "mongodb_agent_url_ubi": mongodb_agent_url_ubi, + } + + pipeline_process_image( + dockerfile_path="docker/mongodb-agent/Dockerfile", + build_configuration=build_configuration_copy, + dockerfile_args=args, + ) + + +def queue_exception_handling(tasks_queue): + exceptions_found = False + for task in tasks_queue.queue: + if task.exception() is not None: + exceptions_found = True + logger.fatal(f"The following exception has been found when building: {task.exception()}") + if exceptions_found: + raise Exception( + f"Exception(s) found when processing Agent images. \nSee also previous logs for more info\nFailing the build" + ) diff --git a/scripts/release/build/build_info_test.py b/scripts/release/build/build_info_test.py index bc9d2734a..8e1d2231f 100644 --- a/scripts/release/build/build_info_test.py +++ b/scripts/release/build/build_info_test.py @@ -74,7 +74,7 @@ def test_load_build_info_patch(git_repo: Repo): "agent": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", platforms=["linux/amd64"], - version="agent-version-from-release.json", + version=patch_id, sign=False, ), "ops-manager": ImageInfo( @@ -170,7 +170,7 @@ def test_load_build_info_staging(git_repo: Repo): "agent": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi", platforms=["linux/arm64", "linux/amd64"], - version="agent-version-from-release.json", + version=expecter_commit_sha, sign=True, ), "ops-manager": ImageInfo( From f894e5bc5ff460263f745d4913a5429b51d9fec8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 12:12:05 +0200 Subject: [PATCH 103/164] Disable concurrent builds --- scripts/release/atomic_pipeline.py | 5 +++-- scripts/release/build/image_build_configuration.py | 5 ++++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index b71f4b345..37e518c0c 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -338,7 +338,8 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration): tasks_queue = Queue() max_workers = 1 if build_configuration.parallel: - max_workers = None + # TODO: remove this once we have a proper synchronization for buildx builder concurrent creation + max_workers = 1 if build_configuration.parallel_factor > 0: max_workers = build_configuration.parallel_factor with ProcessPoolExecutor(max_workers=max_workers) as executor: @@ -435,7 +436,7 @@ def _build_agent_operator( f"https://downloads.mongodb.org/tools/db/mongodb-database-tools-{tools_distro}-{tools_version}.tgz" ) mongodb_agent_url_ubi = f"https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-{agent_version[0]}.{agent_distro}.tar.gz" - init_database_image = f"{build_configuration.registry}/mongodb-kubernetes-init-database:{operator_version}" + init_database_image = f"{build_configuration.base_registry()}/mongodb-kubernetes-init-database:{operator_version}" tasks_queue.put( executor.submit( diff --git a/scripts/release/build/image_build_configuration.py b/scripts/release/build/image_build_configuration.py index 750a69c76..7d3086d66 100644 --- a/scripts/release/build/image_build_configuration.py +++ b/scripts/release/build/image_build_configuration.py @@ -21,5 +21,8 @@ class ImageBuildConfiguration: def is_release_scenario(self) -> bool: return self.scenario == BuildScenario.RELEASE + def base_registry(self) -> str: + return self.registry.rpartition('/')[0] + def image_name(self) -> str: - return self.registry.split('/')[-1] + return self.registry.rpartition('/')[2] From b14022044cab675c340e807acd30a5b5e250ef4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 14:30:34 +0200 Subject: [PATCH 104/164] Move all env vars to constants.py --- docker/mongodb-community-tests/Dockerfile | 4 +--- scripts/release/atomic_pipeline.py | 26 ++--------------------- scripts/release/build/build_scenario.py | 11 +++++----- scripts/release/constants.py | 16 ++++++++++++++ scripts/release/pipeline_main.py | 16 +++----------- 5 files changed, 28 insertions(+), 45 deletions(-) diff --git a/docker/mongodb-community-tests/Dockerfile b/docker/mongodb-community-tests/Dockerfile index b568ff77f..0234b7e27 100644 --- a/docker/mongodb-community-tests/Dockerfile +++ b/docker/mongodb-community-tests/Dockerfile @@ -6,9 +6,7 @@ # # Ref: https://cryptography.io/en/latest/installation/#building-cryptography-on-linux # -ARG GOLANG_VERSION - -FROM public.ecr.aws/docker/library/golang:${GOLANG_VERSION} as builder +FROM public.ecr.aws/docker/library/golang:1.24 as builder ENV GO111MODULE=on ENV GOPATH "" diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 37e518c0c..f863a199c 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -122,16 +122,10 @@ def build_mco_tests_image(build_configuration: ImageBuildConfiguration): """ Builds image used to run community tests. """ - golang_version = os.getenv("GOLANG_VERSION", "1.24") - if golang_version == "": - raise Exception("Missing GOLANG_VERSION environment variable") - - buildargs = dict({"GOLANG_VERSION": golang_version}) pipeline_process_image( dockerfile_path="docker/mongodb-community-tests/Dockerfile", build_configuration=build_configuration, - dockerfile_args=buildargs, ) @@ -139,8 +133,8 @@ def build_operator_image(build_configuration: ImageBuildConfiguration): """Calculates arguments required to build the operator image, and starts the build process.""" # In evergreen, we can pass test_suffix env to publish the operator to a quay # repository with a given suffix. - test_suffix = os.environ.get("test_suffix", "") - log_automation_config_diff = os.environ.get("LOG_AUTOMATION_CONFIG_DIFF", "false") + test_suffix = os.getenv("test_suffix", "") + log_automation_config_diff = os.getenv("LOG_AUTOMATION_CONFIG_DIFF", "false") args = { "version": build_configuration.version, @@ -283,17 +277,9 @@ def build_readiness_probe_image(build_configuration: ImageBuildConfiguration): Builds image used for readiness probe. """ - golang_version = os.getenv("GOLANG_VERSION", "1.24") - - extra_args = { - "version": build_configuration.version, - "GOLANG_VERSION": golang_version, - } - pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile", build_configuration=build_configuration, - dockerfile_args=extra_args, ) @@ -302,17 +288,9 @@ def build_upgrade_hook_image(build_configuration: ImageBuildConfiguration): Builds image used for version upgrade post-start hook. """ - golang_version = os.getenv("GOLANG_VERSION", "1.24") - - extra_args = { - "version": build_configuration.version, - "GOLANG_VERSION": golang_version, - } - pipeline_process_image( dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile", build_configuration=build_configuration, - dockerfile_args=extra_args, ) diff --git a/scripts/release/build/build_scenario.py b/scripts/release/build/build_scenario.py index e5f0e0c22..f49ef74da 100644 --- a/scripts/release/build/build_scenario.py +++ b/scripts/release/build/build_scenario.py @@ -5,6 +5,7 @@ from git import Repo from scripts.release.version import calculate_next_version +from scripts.release.constants import triggered_by_git_tag, is_evg_patch, is_running_in_evg, get_version_id COMMIT_SHA_LENGTH = 8 @@ -18,10 +19,10 @@ class BuildScenario(StrEnum): @classmethod def infer_scenario_from_environment(cls) -> "BuildScenario": """Infer the build scenario from environment variables.""" - git_tag = os.getenv("triggered_by_git_tag") - is_patch = os.getenv("is_patch", "false").lower() == "true" - is_evg = os.getenv("RUNNING_IN_EVG", "false").lower() == "true" - patch_id = os.getenv("version_id") + git_tag = triggered_by_git_tag() + is_patch = is_evg_patch() + is_evg = is_running_in_evg() + patch_id = get_version_id() if git_tag: # Release scenario and the git tag will be used for promotion process only @@ -46,7 +47,7 @@ def get_version(self, repository_path: str, changelog_sub_path: str, initial_com match self: case BuildScenario.PATCH: - patch_id = os.getenv("version_id") + patch_id = get_version_id() if not patch_id: raise ValueError(f"version_id environment variable is not set for `{self}` build scenario") return patch_id diff --git a/scripts/release/constants.py b/scripts/release/constants.py index 694bba706..661e13222 100644 --- a/scripts/release/constants.py +++ b/scripts/release/constants.py @@ -14,3 +14,19 @@ def get_initial_version() -> str | None: def get_initial_commit_sha() -> str | None: return os.getenv(RELEASE_INITIAL_COMMIT_SHA_ENV_VAR) + + +def triggered_by_git_tag() -> str | None: + return os.getenv("triggered_by_git_tag") + + +def is_evg_patch() -> bool: + return os.getenv("is_patch", "false").lower() == "true" + + +def is_running_in_evg() -> bool: + return os.getenv("RUNNING_IN_EVG", "false").lower() == "true" + + +def get_version_id() -> str | None: + return os.getenv("version_id") diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 0562e3f4e..7f35b0812 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -56,14 +56,13 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: "upgrade-hook": build_upgrade_hook_image, # working, but still using single arch build "operator-quick": build_operator_image_patch, # TODO: remove this image, it is not used anymore "database": build_database_image, # working - "agent-pct": build_agent_on_agent_bump, - "agent": build_agent_default_case, + "agent": build_agent_default_case, # working # Init images "init-appdb": build_init_appdb_image, # working "init-database": build_init_database_image, # working "init-ops-manager": build_init_om_image, # working # Ops Manager image - "ops-manager": build_om_image, + "ops-manager": build_om_image, # working } return image_builders @@ -185,16 +184,7 @@ def main(): "--registry", help="Override the base registry instead of resolving from build scenario", ) - parser.add_argument( - "--sign", action="store_true", help="Force signing instead of resolving condition from build scenario" - ) - - # Agent specific arguments - parser.add_argument( - "--all-agents", - action="store_true", - help="Build all agent variants instead of only the latest", - ) + # For agent builds parser.add_argument( "--parallel-factor", default=0, From 7b5a06463f8750190805c2734dbf6f9d45a2621d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 14:42:43 +0200 Subject: [PATCH 105/164] Remove `operator-quick` image build process --- scripts/release/atomic_pipeline.py | 5 -- scripts/release/build/build_info_test.py | 3 +- scripts/release/build/build_scenario.py | 5 +- scripts/release/build/image_build_process.py | 2 +- scripts/release/optimized_operator_build.py | 87 -------------------- scripts/release/pipeline_main.py | 6 +- 6 files changed, 7 insertions(+), 101 deletions(-) delete mode 100644 scripts/release/optimized_operator_build.py diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index f863a199c..c9b085d2b 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -152,11 +152,6 @@ def build_operator_image(build_configuration: ImageBuildConfiguration): ) -def build_operator_image_patch(build_configuration: ImageBuildConfiguration): - if not build_operator_image_fast(build_configuration): - build_operator_image(build_configuration) - - def build_database_image(build_configuration: ImageBuildConfiguration): """ Builds a new database image. diff --git a/scripts/release/build/build_info_test.py b/scripts/release/build/build_info_test.py index 8e1d2231f..0b844e1e8 100644 --- a/scripts/release/build/build_info_test.py +++ b/scripts/release/build/build_info_test.py @@ -1,5 +1,7 @@ import os +from git import Repo + from scripts.release.build.build_info import ( BinaryInfo, BuildInfo, @@ -7,7 +9,6 @@ ImageInfo, load_build_info, ) -from git import Repo from scripts.release.build.build_scenario import BuildScenario diff --git a/scripts/release/build/build_scenario.py b/scripts/release/build/build_scenario.py index f49ef74da..3d83288b7 100644 --- a/scripts/release/build/build_scenario.py +++ b/scripts/release/build/build_scenario.py @@ -1,11 +1,10 @@ -import os from enum import StrEnum -from lib.base_logger import logger from git import Repo -from scripts.release.version import calculate_next_version +from lib.base_logger import logger from scripts.release.constants import triggered_by_git_tag, is_evg_patch, is_running_in_evg, get_version_id +from scripts.release.version import calculate_next_version COMMIT_SHA_LENGTH = 8 diff --git a/scripts/release/build/image_build_process.py b/scripts/release/build/image_build_process.py index cf474ee3b..126cba258 100644 --- a/scripts/release/build/image_build_process.py +++ b/scripts/release/build/image_build_process.py @@ -3,11 +3,11 @@ from typing import Dict import boto3 +import docker import python_on_whales from botocore.exceptions import BotoCoreError, ClientError from python_on_whales.exceptions import DockerException -import docker from lib.base_logger import logger diff --git a/scripts/release/optimized_operator_build.py b/scripts/release/optimized_operator_build.py deleted file mode 100644 index 0c5a74b78..000000000 --- a/scripts/release/optimized_operator_build.py +++ /dev/null @@ -1,87 +0,0 @@ -import os -import subprocess -import tarfile -from datetime import datetime, timedelta - -import docker -from lib.base_logger import logger -from scripts.release.build.image_build_configuration import ImageBuildConfiguration - - -def copy_into_container(client, src, dst): - """Copies a local file into a running container.""" - - os.chdir(os.path.dirname(src)) - srcname = os.path.basename(src) - with tarfile.open(src + ".tar", mode="w") as tar: - tar.add(srcname) - - name, dst = dst.split(":") - container = client.containers.get(name) - - with open(src + ".tar", "rb") as fd: - container.put_archive(os.path.dirname(dst), fd.read()) - - -def build_operator_image_fast(build_configuration: ImageBuildConfiguration) -> bool: - """This function builds the operator locally and pushed into an existing - Docker image. This is the fastest way I could image we can do this.""" - - client = docker.from_env() - # image that we know is where we build operator. - image_repo = build_configuration.registry + "/" + build_configuration.image_type + "/mongodb-kubernetes" - image_tag = "latest" - repo_tag = image_repo + ":" + image_tag - - logger.debug(f"Pulling image: {repo_tag}") - try: - image = client.images.get(repo_tag) - except docker.errors.ImageNotFound: - logger.debug("Operator image does not exist locally. Building it now") - return False - - logger.debug("Done") - too_old = datetime.now() - timedelta(hours=3) - image_timestamp = datetime.fromtimestamp( - image.history()[0]["Created"] - ) # Layer 0 is the latest added layer to this Docker image. [-1] is the FROM layer. - - if image_timestamp < too_old: - logger.info("Current operator image is too old, will rebuild it completely first") - return False - - container_name = "mongodb-enterprise-operator" - operator_binary_location = "/usr/local/bin/mongodb-kubernetes-operator" - try: - client.containers.get(container_name).remove() - logger.debug(f"Removed {container_name}") - except docker.errors.NotFound: - pass - - container = client.containers.run(repo_tag, name=container_name, entrypoint="sh", detach=True) - - logger.debug("Building operator with debugging symbols") - subprocess.run(["make", "manager"], check=True, stdout=subprocess.PIPE) - logger.debug("Done building the operator") - - copy_into_container( - client, - os.getcwd() + "/docker/mongodb-kubernetes-operator/content/mongodb-kubernetes-operator", - container_name + ":" + operator_binary_location, - ) - - # Commit changes on disk as a tag - container.commit( - repository=image_repo, - tag=image_tag, - ) - # Stop this container so we can use it next time - container.stop() - container.remove() - - logger.info("Pushing operator to {}:{}".format(image_repo, image_tag)) - client.images.push( - repository=image_repo, - tag=image_tag, - ) - return True diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 7f35b0812..489c5b77c 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -24,7 +24,6 @@ build_mco_tests_image, build_om_image, build_operator_image, - build_operator_image_patch, build_readiness_probe_image, build_tests_image, build_upgrade_hook_image, @@ -52,9 +51,8 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: "meko-tests": build_tests_image, # working "operator": build_operator_image, # working "mco-tests": build_mco_tests_image, # working - "readiness-probe": build_readiness_probe_image, # working, but still using single arch build - "upgrade-hook": build_upgrade_hook_image, # working, but still using single arch build - "operator-quick": build_operator_image_patch, # TODO: remove this image, it is not used anymore + "readiness-probe": build_readiness_probe_image, # working + "upgrade-hook": build_upgrade_hook_image, # working "database": build_database_image, # working "agent": build_agent_default_case, # working # Init images From 8df2ce469ae7ec55e2b32273b03d0f7157ec386e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 14:46:57 +0200 Subject: [PATCH 106/164] Rebase fix --- scripts/release/build/image_build_configuration.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/release/build/image_build_configuration.py b/scripts/release/build/image_build_configuration.py index 7d3086d66..5eb497759 100644 --- a/scripts/release/build/image_build_configuration.py +++ b/scripts/release/build/image_build_configuration.py @@ -16,7 +16,6 @@ class ImageBuildConfiguration: parallel_factor: int = 0 platforms: Optional[List[str]] = None sign: bool = False - all_agents: bool = False def is_release_scenario(self) -> bool: return self.scenario == BuildScenario.RELEASE From b6b05aa2fe08ece0143b9f6435c1b003b804dfa0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Thu, 7 Aug 2025 15:12:33 +0200 Subject: [PATCH 107/164] Add release scenarios for agent and ops-manager --- build_info.json | 17 ++++++++++++++++- scripts/release/build/build_info.py | 15 ++++++++------- scripts/release/build/build_info_test.py | 14 +++++++++++++- scripts/release/release_info.py | 2 +- 4 files changed, 38 insertions(+), 10 deletions(-) diff --git a/build_info.json b/build_info.json index effa51a4b..c84c3bfd6 100644 --- a/build_info.json +++ b/build_info.json @@ -212,6 +212,14 @@ "linux/arm64", "linux/amd64" ] + }, + "release": { + "sign": true, + "repository": "quay.io/mongodb/mongodb-agent-ubi", + "platforms": [ + "linux/arm64", + "linux/amd64" + ] } }, "ops-manager": { @@ -227,7 +235,14 @@ "sign": true, "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-enterprise-ops-manager", "platforms": [ - "linux/arm64", + "linux/amd64" + ] + }, + "release": { + "version": "om-version-from-release.json", + "sign": true, + "repository": "quay.io/mongodb/mongodb-enterprise-ops-manager", + "platforms": [ "linux/amd64" ] } diff --git a/scripts/release/build/build_info.py b/scripts/release/build/build_info.py index fad5a6b14..b937889a3 100644 --- a/scripts/release/build/build_info.py +++ b/scripts/release/build/build_info.py @@ -14,7 +14,7 @@ class ImageInfo: version: str sign: bool - def to_json(self): + def to_release_info_json(self): return {"repository": self.repository, "platforms": self.platforms, "version": self.version} @@ -25,7 +25,7 @@ class BinaryInfo: version: str sign: bool - def to_json(self): + def to_release_info_json(self): return {"platforms": self.platforms, "version": self.version} @@ -35,7 +35,7 @@ class HelmChartInfo: version: str sign: bool - def to_json(self): + def to_release_info_json(self): return {"repository": self.repository, "version": self.version} @@ -45,11 +45,12 @@ class BuildInfo: binaries: Dict[str, BinaryInfo] helm_charts: Dict[str, HelmChartInfo] - def to_json(self): + def to_release_info_json(self): return { - "images": {name: images.to_json() for name, images in self.images.items()}, - "binaries": {name: bin.to_json() for name, bin in self.binaries.items()}, - "helm-charts": {name: chart.to_json() for name, chart in self.helm_charts.items()}, + "images": {name: images.to_release_info_json() for name, images in self.images.items() if + name not in ["agent", "ops-manager"]}, + "binaries": {name: bin.to_release_info_json() for name, bin in self.binaries.items()}, + "helm-charts": {name: chart.to_release_info_json() for name, chart in self.helm_charts.items()}, } diff --git a/scripts/release/build/build_info_test.py b/scripts/release/build/build_info_test.py index 0b844e1e8..20f563981 100644 --- a/scripts/release/build/build_info_test.py +++ b/scripts/release/build/build_info_test.py @@ -176,7 +176,7 @@ def test_load_build_info_staging(git_repo: Repo): ), "ops-manager": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-enterprise-ops-manager", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/amd64"], version="om-version-from-release.json", sign=True, ), @@ -252,6 +252,18 @@ def test_load_build_info_release(git_repo: Repo, readinessprobe_version: str, version=operator_version_upgrade_post_start_hook_version, sign=True, ), + "agent": ImageInfo( + repository="quay.io/mongodb/mongodb-agent-ubi", + platforms=["linux/arm64", "linux/amd64"], + version=version, + sign=True, + ), + "ops-manager": ImageInfo( + repository="quay.io/mongodb/mongodb-enterprise-ops-manager", + platforms=["linux/amd64"], + version="om-version-from-release.json", + sign=True, + ), }, binaries={ "kubectl-mongodb": BinaryInfo( diff --git a/scripts/release/release_info.py b/scripts/release/release_info.py index 40fc7f3bc..dfdef01cc 100644 --- a/scripts/release/release_info.py +++ b/scripts/release/release_info.py @@ -22,7 +22,7 @@ def create_release_info_json( initial_version=initial_version, ) - return json.dumps(build_info.to_json(), indent=2) + return json.dumps(build_info.to_release_info_json(), indent=2) if __name__ == "__main__": From c5ad3c59ba807ae445f2827454c072a5e88dfb98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Fri, 8 Aug 2025 09:36:24 +0200 Subject: [PATCH 108/164] merge from Julien branch --- pipeline.py | 4 +- pipeline_test.py | 2 +- scripts/release/atomic_pipeline.py | 138 +++++++++--------- scripts/release/build/image_build_process.py | 29 +--- .../build/image_signing.py} | 4 +- 5 files changed, 78 insertions(+), 99 deletions(-) rename scripts/{evergreen/release/images_signing.py => release/build/image_signing.py} (99%) diff --git a/pipeline.py b/pipeline.py index ee48ed919..e5955205e 100755 --- a/pipeline.py +++ b/pipeline.py @@ -45,12 +45,12 @@ get_supported_operator_versions, get_supported_version_for_image_matrix_handling, ) -from scripts.evergreen.release.images_signing import ( +from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli +from scripts.release.build.image_signing import ( mongodb_artifactory_login, sign_image, verify_signature, ) -from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli TRACER = trace.get_tracer("evergreen-agent") diff --git a/pipeline_test.py b/pipeline_test.py index 68b7e3a8e..f0ae96649 100644 --- a/pipeline_test.py +++ b/pipeline_test.py @@ -14,7 +14,7 @@ is_version_in_range, operator_build_configuration, ) -from scripts.evergreen.release.images_signing import run_command_with_retries +from scripts.release.build.image_signing import run_command_with_retries release_json = { "supportedImages": { diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index c9b085d2b..1f9919397 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -16,70 +16,57 @@ from packaging.version import Version from lib.base_logger import logger -from scripts.evergreen.release.images_signing import ( +from scripts.release.build.image_build_configuration import ImageBuildConfiguration +from scripts.release.build.image_build_process import execute_docker_build +from scripts.release.build.image_signing import ( sign_image, verify_signature, ) -from scripts.release.build.image_build_configuration import ImageBuildConfiguration -from scripts.release.build.image_build_process import build_image - -from .optimized_operator_build import build_operator_image_fast TRACER = trace.get_tracer("evergreen-agent") -def get_tools_distro(tools_version: str) -> Dict[str, str]: - new_rhel_tool_version = "100.10.0" - default_distro = {"arm": "rhel90-aarch64", "amd": "rhel90-x86_64"} - if Version(tools_version) >= Version(new_rhel_tool_version): - return {"arm": "rhel93-aarch64", "amd": "rhel93-x86_64"} - return default_distro - - -def load_release_file() -> Dict: - with open("release.json") as release: - return json.load(release) - - -@TRACER.start_as_current_span("sonar_build_image") -def pipeline_process_image( +@TRACER.start_as_current_span("build_image_generic") +def build_image( dockerfile_path: str, build_configuration: ImageBuildConfiguration, - dockerfile_args: Dict[str, str] = None, + build_args: Dict[str, str] = None, build_path: str = ".", ): - """Builds a Docker image with arguments defined in `args`.""" + """ + Build an image then (optionally) sign the result. + """ image_name = build_configuration.image_name() span = trace.get_current_span() span.set_attribute("mck.image_name", image_name) - if dockerfile_args: - span.set_attribute("mck.build_args", str(dockerfile_args)) - if not dockerfile_args: - dockerfile_args = {} - logger.info(f"Dockerfile args: {dockerfile_args}, for image: {image_name}") + registry = build_configuration.base_registry + build_args = build_args or {} - build_image( - image_tag=build_configuration.version, - dockerfile_path=dockerfile_path, - dockerfile_args=dockerfile_args, - registry=build_configuration.registry, + if build_args: + span.set_attribute("mck.build_args", str(build_args)) + + logger.info(f"Building {image_name}, dockerfile args: {build_args}") + logger.debug(f"Build args: {build_args}") + logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") + logger.debug(f"build image generic - registry={registry}") + + # Build docker registry URI and call build_image + image_full_uri = f"{build_configuration.registry}:{build_configuration.version}" + + execute_docker_build( + tag=image_full_uri, + dockerfile=dockerfile_path, + path=build_path, + args=build_args, + push=True, platforms=build_configuration.platforms, - build_path=build_path, ) if build_configuration.sign: - pipeline_sign_image( - registry=build_configuration.registry, - version=build_configuration.version, - ) - - -@TRACER.start_as_current_span("sign_image_in_repositories") -def pipeline_sign_image(registry: str, version: str): - logger.info("Signing image") - sign_image(registry, version) - verify_signature(registry, version) + logger.info("Signing image") + sign_image(build_configuration.registry, build_configuration.version) + verify_signature(build_configuration.registry, build_configuration.version) def build_tests_image(build_configuration: ImageBuildConfiguration): @@ -110,10 +97,10 @@ def build_tests_image(build_configuration: ImageBuildConfiguration): build_args = dict({"PYTHON_VERSION": python_version}) - pipeline_process_image( + build_image( dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", build_configuration=build_configuration, - dockerfile_args=build_args, + build_args=build_args, build_path="docker/mongodb-kubernetes-tests", ) @@ -123,7 +110,7 @@ def build_mco_tests_image(build_configuration: ImageBuildConfiguration): Builds image used to run community tests. """ - pipeline_process_image( + build_image( dockerfile_path="docker/mongodb-community-tests/Dockerfile", build_configuration=build_configuration, ) @@ -144,11 +131,10 @@ def build_operator_image(build_configuration: ImageBuildConfiguration): logger.info(f"Building Operator args: {args}") - image_name = "mongodb-kubernetes" - pipeline_process_image( + build_image( dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile", build_configuration=build_configuration, - dockerfile_args=args, + build_args=args, ) @@ -158,10 +144,10 @@ def build_database_image(build_configuration: ImageBuildConfiguration): """ args = {"version": build_configuration.version} - pipeline_process_image( + build_image( dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile", build_configuration=build_configuration, - dockerfile_args=args, + build_args=args, ) @@ -182,7 +168,7 @@ def find_om_in_releases(om_version: str, releases: Dict[str, str]) -> Optional[s def get_om_releases() -> Dict[str, str]: - """Returns a dictionary representation of the Json document holdin all the OM + """Returns a dictionary representation of the Json document holding all the OM releases. """ ops_manager_release_archive = ( @@ -208,10 +194,11 @@ def find_om_url(om_version: str) -> str: def build_init_om_image(build_configuration: ImageBuildConfiguration): args = {"version": build_configuration.version} - pipeline_process_image( + + build_image( dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile", build_configuration=build_configuration, - dockerfile_args=args, + build_args=args, ) @@ -234,10 +221,10 @@ def build_om_image(build_configuration: ImageBuildConfiguration): "om_download_url": om_download_url, } - pipeline_process_image( + build_image( dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile", build_configuration=build_configuration, - dockerfile_args=args, + build_args=args, ) @@ -247,10 +234,10 @@ def build_init_appdb_image(build_configuration: ImageBuildConfiguration): mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} - pipeline_process_image( + build_image( dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile", build_configuration=build_configuration, - dockerfile_args=args, + build_args=args, ) @@ -260,10 +247,11 @@ def build_init_database_image(build_configuration: ImageBuildConfiguration): base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} - pipeline_process_image( + + build_image( "docker/mongodb-kubernetes-init-database/Dockerfile", build_configuration=build_configuration, - dockerfile_args=args, + build_args=args, ) @@ -272,7 +260,7 @@ def build_readiness_probe_image(build_configuration: ImageBuildConfiguration): Builds image used for readiness probe. """ - pipeline_process_image( + build_image( dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile", build_configuration=build_configuration, ) @@ -283,7 +271,7 @@ def build_upgrade_hook_image(build_configuration: ImageBuildConfiguration): Builds image used for version upgrade post-start hook. """ - pipeline_process_image( + build_image( dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile", build_configuration=build_configuration, ) @@ -293,7 +281,6 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration): """ Build the agent only for the latest operator for patches and operator releases. - See more information in the function: build_agent_on_agent_bump """ release = load_release_file() @@ -316,12 +303,12 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration): if build_configuration.parallel_factor > 0: max_workers = build_configuration.parallel_factor with ProcessPoolExecutor(max_workers=max_workers) as executor: - logger.info(f"running with factor of {max_workers}") - print(f"======= Versions to build {agent_versions_to_build} =======") + logger.info(f"Running with factor of {max_workers}") + logger.info(f"======= Agent versions to build {agent_versions_to_build} =======") for idx, agent_version in enumerate(agent_versions_to_build): # We don't need to keep create and push the same image on every build. # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. - print(f"======= Building Agent {agent_version} ({idx}/{len(agent_versions_to_build)})") + logger.info(f"======= Building Agent {agent_version} ({idx}/{len(agent_versions_to_build)})") _build_agent_operator( agent_version, build_configuration, @@ -446,10 +433,10 @@ def build_agent_pipeline( "mongodb_agent_url_ubi": mongodb_agent_url_ubi, } - pipeline_process_image( + build_image( dockerfile_path="docker/mongodb-agent/Dockerfile", build_configuration=build_configuration_copy, - dockerfile_args=args, + build_args=args, ) @@ -463,3 +450,16 @@ def queue_exception_handling(tasks_queue): raise Exception( f"Exception(s) found when processing Agent images. \nSee also previous logs for more info\nFailing the build" ) + + +def get_tools_distro(tools_version: str) -> Dict[str, str]: + new_rhel_tool_version = "100.10.0" + default_distro = {"arm": "rhel90-aarch64", "amd": "rhel90-x86_64"} + if Version(tools_version) >= Version(new_rhel_tool_version): + return {"arm": "rhel93-aarch64", "amd": "rhel93-x86_64"} + return default_distro + + +def load_release_file() -> Dict: + with open("release.json") as release: + return json.load(release) diff --git a/scripts/release/build/image_build_process.py b/scripts/release/build/image_build_process.py index 126cba258..750cc217b 100644 --- a/scripts/release/build/image_build_process.py +++ b/scripts/release/build/image_build_process.py @@ -69,7 +69,7 @@ def ensure_buildx_builder(builder_name: str = "multiarch") -> str: return builder_name -def docker_build_image( +def execute_docker_build( tag: str, dockerfile: str, path: str, args: Dict[str, str], push: bool, platforms: list[str] ): """ @@ -82,6 +82,9 @@ def docker_build_image( :param push: Whether to push the image after building :param platforms: List of target platforms (e.g., ["linux/amd64", "linux/arm64"]) """ + # Login to ECR before building + ecr_login_boto3(region="us-east-1", account_id="268558157000") + docker_cmd = python_on_whales.docker try: @@ -120,27 +123,3 @@ def docker_build_image( except Exception as e: logger.error(f"Failed to build image {tag}: {e}") raise RuntimeError(f"Failed to build image {tag}: {str(e)}") - - -def build_image( - image_tag: str, - dockerfile_path: str, - dockerfile_args: Dict[str, str], - registry: str, - platforms: list[str], - build_path: str, -): - # Login to ECR - ecr_login_boto3(region="us-east-1", account_id="268558157000") - - image_full_uri = f"{registry}:{image_tag}" - - # Build image with docker buildx - docker_build_image( - tag=image_full_uri, - dockerfile=dockerfile_path, - path=build_path, - args=dockerfile_args, - push=True, - platforms=platforms, - ) diff --git a/scripts/evergreen/release/images_signing.py b/scripts/release/build/image_signing.py similarity index 99% rename from scripts/evergreen/release/images_signing.py rename to scripts/release/build/image_signing.py index 9a5b50288..d50116bb8 100644 --- a/scripts/evergreen/release/images_signing.py +++ b/scripts/release/build/image_signing.py @@ -215,7 +215,7 @@ def sign_image(repository: str, tag: str) -> None: @TRACER.start_as_current_span("verify_signature") -def verify_signature(repository: str, tag: str) -> bool: +def verify_signature(repository: str, tag: str): start_time = time.time() span = trace.get_current_span() @@ -231,7 +231,7 @@ def verify_signature(repository: str, tag: str) -> bool: kubernetes_operator_public_key = r.text else: logger.error(f"Failed to retrieve the public key from {public_key_url}: Status code {r.status_code}") - return False + return public_key_var_name = "OPERATOR_PUBLIC_KEY" additional_args = [ From 8183ec547b2485dcaf61e0311d5aaa8c47c998f1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Fri, 8 Aug 2025 10:33:20 +0200 Subject: [PATCH 109/164] Fix release_info.py structure --- scripts/release/build/build_info.py | 29 ++++++--------- scripts/release/pipeline_main.py | 37 +++++++++++++------ scripts/release/release_info.py | 57 ++++++++++++++++++++++++++++- 3 files changed, 92 insertions(+), 31 deletions(-) diff --git a/scripts/release/build/build_info.py b/scripts/release/build/build_info.py index b937889a3..a12a97ea8 100644 --- a/scripts/release/build/build_info.py +++ b/scripts/release/build/build_info.py @@ -6,6 +6,18 @@ from scripts.release.constants import DEFAULT_REPOSITORY_PATH, DEFAULT_CHANGELOG_PATH, RELEASE_INITIAL_VERSION_ENV_VAR, \ get_initial_version, get_initial_commit_sha +MEKO_TESTS_IMAGE = "meko-tests" +OPERATOR_IMAGE = "operator" +MCO_TESTS_IMAGE = "mco-tests" +READINESS_PROBE_IMAGE = "readiness-probe" +UPGRADE_HOOK_IMAGE = "upgrade-hook" +DATABASE_IMAGE = "database" +AGENT_IMAGE = "agent" +INIT_APPDB_IMAGE = "init-appdb" +INIT_DATABASE_IMAGE = "init-database" +INIT_OPS_MANAGER_IMAGE = "init-ops-manager" +OPS_MANAGER_IMAGE = "ops-manager" + @dataclass class ImageInfo: @@ -14,9 +26,6 @@ class ImageInfo: version: str sign: bool - def to_release_info_json(self): - return {"repository": self.repository, "platforms": self.platforms, "version": self.version} - @dataclass class BinaryInfo: @@ -25,9 +34,6 @@ class BinaryInfo: version: str sign: bool - def to_release_info_json(self): - return {"platforms": self.platforms, "version": self.version} - @dataclass class HelmChartInfo: @@ -35,9 +41,6 @@ class HelmChartInfo: version: str sign: bool - def to_release_info_json(self): - return {"repository": self.repository, "version": self.version} - @dataclass class BuildInfo: @@ -45,14 +48,6 @@ class BuildInfo: binaries: Dict[str, BinaryInfo] helm_charts: Dict[str, HelmChartInfo] - def to_release_info_json(self): - return { - "images": {name: images.to_release_info_json() for name, images in self.images.items() if - name not in ["agent", "ops-manager"]}, - "binaries": {name: bin.to_release_info_json() for name, bin in self.binaries.items()}, - "helm-charts": {name: chart.to_release_info_json() for name, chart in self.helm_charts.items()}, - } - def load_build_info(scenario: BuildScenario, repository_path: str = DEFAULT_REPOSITORY_PATH, diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 489c5b77c..803655339 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -28,7 +28,20 @@ build_tests_image, build_upgrade_hook_image, ) -from scripts.release.build.build_info import load_build_info +from scripts.release.build.build_info import ( + AGENT_IMAGE, + DATABASE_IMAGE, + INIT_APPDB_IMAGE, + INIT_DATABASE_IMAGE, + INIT_OPS_MANAGER_IMAGE, + MCO_TESTS_IMAGE, + MEKO_TESTS_IMAGE, + OPERATOR_IMAGE, + OPS_MANAGER_IMAGE, + READINESS_PROBE_IMAGE, + UPGRADE_HOOK_IMAGE, + load_build_info, +) from scripts.release.build.build_scenario import ( BuildScenario, ) @@ -48,19 +61,19 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: """Returns a dictionary of image names that can be built.""" image_builders = { - "meko-tests": build_tests_image, # working - "operator": build_operator_image, # working - "mco-tests": build_mco_tests_image, # working - "readiness-probe": build_readiness_probe_image, # working - "upgrade-hook": build_upgrade_hook_image, # working - "database": build_database_image, # working - "agent": build_agent_default_case, # working + MEKO_TESTS_IMAGE: build_tests_image, # working + OPERATOR_IMAGE: build_operator_image, # working + MCO_TESTS_IMAGE: build_mco_tests_image, # working + READINESS_PROBE_IMAGE: build_readiness_probe_image, # working + UPGRADE_HOOK_IMAGE: build_upgrade_hook_image, # working + DATABASE_IMAGE: build_database_image, # working + AGENT_IMAGE: build_agent_default_case, # working # Init images - "init-appdb": build_init_appdb_image, # working - "init-database": build_init_database_image, # working - "init-ops-manager": build_init_om_image, # working + INIT_APPDB_IMAGE: build_init_appdb_image, # working + INIT_DATABASE_IMAGE: build_init_database_image, # working + INIT_OPS_MANAGER_IMAGE: build_init_om_image, # working # Ops Manager image - "ops-manager": build_om_image, # working + OPS_MANAGER_IMAGE: build_om_image, # working } return image_builders diff --git a/scripts/release/release_info.py b/scripts/release/release_info.py index dfdef01cc..201f4cec9 100644 --- a/scripts/release/release_info.py +++ b/scripts/release/release_info.py @@ -2,7 +2,17 @@ import json import pathlib -from scripts.release.build.build_info import load_build_info +from scripts.release.build.build_info import ( + DATABASE_IMAGE, + INIT_APPDB_IMAGE, + INIT_DATABASE_IMAGE, + INIT_OPS_MANAGER_IMAGE, + OPERATOR_IMAGE, + READINESS_PROBE_IMAGE, + UPGRADE_HOOK_IMAGE, + BuildInfo, + load_build_info, +) from scripts.release.build.build_scenario import BuildScenario from scripts.release.constants import ( DEFAULT_CHANGELOG_PATH, @@ -10,6 +20,16 @@ DEFAULT_REPOSITORY_PATH, ) +RELEASE_INFO_IMAGES_ORDERED = [ + OPERATOR_IMAGE, + INIT_DATABASE_IMAGE, + INIT_APPDB_IMAGE, + INIT_OPS_MANAGER_IMAGE, + DATABASE_IMAGE, + READINESS_PROBE_IMAGE, + UPGRADE_HOOK_IMAGE, +] + def create_release_info_json( repository_path: str, changelog_sub_path: str, initial_commit_sha: str = None, initial_version: str = None @@ -22,7 +42,40 @@ def create_release_info_json( initial_version=initial_version, ) - return json.dumps(build_info.to_release_info_json(), indent=2) + release_info_json = convert_to_release_info_json(build_info) + + return json.dumps(release_info_json, indent=2) + + +def convert_to_release_info_json(build_info: BuildInfo) -> dict: + output = { + "images": {}, + "binaries": {}, + "helm-charts": {}, + } + # Filter (and order) images to include only those relevant for release info + images = {name: build_info.images[name] for name in RELEASE_INFO_IMAGES_ORDERED} + + for name, image in images.items(): + output["images"][name] = { + "repository": image.repository, + "platforms": image.platforms, + "version": image.version, + } + + for name, binary in build_info.binaries.items(): + output["binaries"][name] = { + "platforms": binary.platforms, + "version": binary.version, + } + + for name, chart in build_info.helm_charts.items(): + output["helm-charts"][name] = { + "repository": chart.repository, + "version": chart.version, + } + + return output if __name__ == "__main__": From 2ec75879a950aa6ea579792aaa088ba65e48b7fc Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 11:15:01 +0200 Subject: [PATCH 110/164] Explicitly push to ECR with latest tag. Staging as a followup --- scripts/release/build_context.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py index 143693f46..ee9f6f0ed 100644 --- a/scripts/release/build_context.py +++ b/scripts/release/build_context.py @@ -69,6 +69,9 @@ def get_version(self) -> str: """Gets the version that will be used to tag the images.""" if self.scenario == BuildScenario.RELEASE: return self.git_tag + if self.scenario == BuildScenario.STAGING: + # On master merges, always use "latest" (preserving legacy behavior) + return "latest" if self.patch_id: return self.patch_id # Alternatively, we can fail here if no ID is explicitly defined @@ -77,7 +80,10 @@ def get_version(self) -> str: def get_base_registry(self) -> str: """Get the base registry URL for the current scenario.""" # TODO CLOUDP-335471: when working on the promotion process, use the prod registry variable in RELEASE scenario - if self.scenario == BuildScenario.STAGING: - return os.environ.get("STAGING_REPO_URL") - else: - return os.environ.get("BASE_REPO_URL") + # TODO CLOUDP-335471: STAGING scenario should also push to STAGING_REPO_URL with version_id tag, + # in addition to the current ECR dev latest push (for backward compatibility) + # This will enable proper staging environment testing before production releases + + # For now, always use BASE_REPO_URL to preserve legacy behavior + # (STAGING pushes to ECR dev with "latest" tag) + return os.environ.get("BASE_REPO_URL") From 1badff0183576a697aaab94ea1530951b7c52914 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 11:38:40 +0200 Subject: [PATCH 111/164] Ensure builder in main to fix race conditions --- scripts/release/build_context.py | 2 +- scripts/release/build_images.py | 17 +++++++++++------ scripts/release/pipeline_main.py | 5 +++++ 3 files changed, 17 insertions(+), 7 deletions(-) diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py index ee9f6f0ed..d00d8de37 100644 --- a/scripts/release/build_context.py +++ b/scripts/release/build_context.py @@ -83,7 +83,7 @@ def get_base_registry(self) -> str: # TODO CLOUDP-335471: STAGING scenario should also push to STAGING_REPO_URL with version_id tag, # in addition to the current ECR dev latest push (for backward compatibility) # This will enable proper staging environment testing before production releases - + # For now, always use BASE_REPO_URL to preserve legacy behavior # (STAGING pushes to ECR dev with "latest" tag) return os.environ.get("BASE_REPO_URL") diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index e2a43683b..01e2f1d45 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -10,6 +10,8 @@ import docker from lib.base_logger import logger +DEFAULT_BUILDER_NAME = "multiarch" # Default buildx builder name + def ecr_login_boto3(region: str, account_id: str): """ @@ -38,7 +40,7 @@ def ecr_login_boto3(region: str, account_id: str): logger.debug(f"ECR login succeeded: {status}") -def ensure_buildx_builder(builder_name: str = "multiarch") -> str: +def ensure_buildx_builder(builder_name: str = DEFAULT_BUILDER_NAME) -> str: """ Ensures a Docker Buildx builder exists for multi-platform builds. @@ -70,7 +72,13 @@ def ensure_buildx_builder(builder_name: str = "multiarch") -> str: def execute_docker_build( - tag: str, dockerfile: str, path: str, args: Dict[str, str] = {}, push: bool = True, platforms: list[str] = None + tag: str, + dockerfile: str, + path: str, + args: Dict[str, str] = {}, + push: bool = True, + platforms: list[str] = None, + builder_name: str = DEFAULT_BUILDER_NAME, ): """ Build a Docker image using python_on_whales and Docker Buildx for multi-architecture support. @@ -105,10 +113,7 @@ def execute_docker_build( if len(platforms) > 1: logger.info(f"Multi-platform build for {len(platforms)} architectures") - # We need a special driver to handle multi-platform builds - builder_name = ensure_buildx_builder("multiarch") - - # Build the image using buildx + # Build the image using buildx, builder must be already initialized docker.buildx.build( context_path=path, file=dockerfile, diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 3f7b9473d..e3b32aaaa 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -34,6 +34,7 @@ BuildContext, BuildScenario, ) +from scripts.release.build_images import DEFAULT_BUILDER_NAME, ensure_buildx_builder """ The goal of main.py, build_configuration.py and build_context.py is to provide a single source of truth for the build @@ -145,6 +146,10 @@ def main(): logger.info(f"Building image: {args.image}") logger.info(f"Build configuration: {build_config}") + # Create buildx builder + # It must be initialized here as opposed to in build_images.py so that parallel calls (such as agent builds) can access it + # and not face race conditions + ensure_buildx_builder(DEFAULT_BUILDER_NAME) build_image(args.image, build_config) From 9e2815ad81ea58c60b8887f9508d8141b913de47 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 11:49:08 +0200 Subject: [PATCH 112/164] Log line --- scripts/release/build_images.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 01e2f1d45..8d26962b9 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -50,6 +50,7 @@ def ensure_buildx_builder(builder_name: str = DEFAULT_BUILDER_NAME) -> str: docker = python_on_whales.docker + logger.info(f"Ensuring buildx builder '{builder_name}' exists...") existing_builders = docker.buildx.list() if any(b.name == builder_name for b in existing_builders): logger.info(f"Builder '{builder_name}' already exists – reusing it.") From e17b32356b779ce9c75cc962f27b296be9bbf7ef Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 11:50:26 +0200 Subject: [PATCH 113/164] Remove unused is_running_in_evg_pipeline --- scripts/release/atomic_pipeline.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index f0ca02e00..5bb466237 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -37,10 +37,6 @@ def get_tools_distro(tools_version: str) -> Dict[str, str]: return default_distro -def is_running_in_evg_pipeline(): - return os.getenv("RUNNING_IN_EVG", "") == "true" - - def load_release_file() -> Dict: with open("release.json") as release: return json.load(release) From 075fcae6936f5d2e6b7c4d332310bdb3e6c864ef Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 12:01:22 +0200 Subject: [PATCH 114/164] Typo from merge conflict --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 056bb0083..728721da5 100644 --- a/Makefile +++ b/Makefile @@ -181,7 +181,7 @@ build-and-push-images: build-and-push-operator-image appdb-init-image om-init-im build-and-push-init-images: appdb-init-image om-init-image database-init-image database-init-image: - @ scripts/dev/run_python.sh scripts/release/pipeline_main.puy init-database + @ scripts/dev/run_python.sh scripts/release/pipeline_main.py init-database appdb-init-image: @ scripts/dev/run_python.sh scripts/release/pipeline_main.py init-appdb From afc9b7995a67393500bfc5aa2dd069b21ab0c1d3 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 12:03:18 +0200 Subject: [PATCH 115/164] Follow up TODO --- scripts/release/build_images.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 8d26962b9..8b9404eb8 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -92,6 +92,7 @@ def execute_docker_build( :param platforms: List of target platforms (e.g., ["linux/amd64", "linux/arm64"]) """ # Login to ECR before building + # TODO CLOUDP-335471: use env variables to configure AWS region and account ID ecr_login_boto3(region="us-east-1", account_id="268558157000") docker = python_on_whales.docker From 3ef9e2c0787662dd5813ce8c1a5ba9c4014f5cfd Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 13:02:25 +0200 Subject: [PATCH 116/164] Login for garasign image --- scripts/release/atomic_pipeline.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 5bb466237..b1473aa2d 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -18,6 +18,7 @@ from lib.base_logger import logger from scripts.evergreen.release.images_signing import ( + mongodb_artifactory_login, sign_image, verify_signature, ) @@ -254,6 +255,8 @@ def build_image( ) if build_configuration.sign: + logger.info("Logging in MongoDB Artifactory for Garasign image") + mongodb_artifactory_login() logger.info("Signing image") sign_image(docker_registry, build_configuration.version) verify_signature(docker_registry, build_configuration.version) From d2a61532a9bc9c67e24c68017b14be8f03804dcc Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 13:04:34 +0200 Subject: [PATCH 117/164] Handle builder creation race condition with an exception --- scripts/release/build_images.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 8b9404eb8..d998d44a8 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -43,6 +43,7 @@ def ecr_login_boto3(region: str, account_id: str): def ensure_buildx_builder(builder_name: str = DEFAULT_BUILDER_NAME) -> str: """ Ensures a Docker Buildx builder exists for multi-platform builds. + This function is safe for concurrent execution across multiple processes. :param builder_name: Name for the buildx builder :return: The builder name that was created or reused @@ -66,6 +67,13 @@ def ensure_buildx_builder(builder_name: str = DEFAULT_BUILDER_NAME) -> str: ) logger.info(f"Created new buildx builder: {builder_name}") except DockerException as e: + # Check if this is a race condition (another process created the builder) + if hasattr(e, 'stderr') and 'existing instance for' in str(e.stderr): + logger.info(f"Builder '{builder_name}' was created by another process – using it.") + docker.buildx.use(builder_name) + return builder_name + + # Otherwise, it's a real error logger.error(f"Failed to create buildx builder: {e}") raise From c6fc163b00036c5f0908683a44a22ecf33afaedb Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 13:09:30 +0200 Subject: [PATCH 118/164] Cleanup ensure --- scripts/release/build_images.py | 8 +++++++- scripts/release/pipeline_main.py | 5 ----- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index d998d44a8..95347f073 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -1,5 +1,8 @@ # This file is the new Sonar import base64 +import fcntl +import os +import time from typing import Dict import boto3 @@ -123,7 +126,10 @@ def execute_docker_build( if len(platforms) > 1: logger.info(f"Multi-platform build for {len(platforms)} architectures") - # Build the image using buildx, builder must be already initialized + # Ensure buildx builder exists (safe for concurrent execution) + ensure_buildx_builder(builder_name) + + # Build the image using buildx docker.buildx.build( context_path=path, file=dockerfile, diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index e3b32aaaa..3f7b9473d 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -34,7 +34,6 @@ BuildContext, BuildScenario, ) -from scripts.release.build_images import DEFAULT_BUILDER_NAME, ensure_buildx_builder """ The goal of main.py, build_configuration.py and build_context.py is to provide a single source of truth for the build @@ -146,10 +145,6 @@ def main(): logger.info(f"Building image: {args.image}") logger.info(f"Build configuration: {build_config}") - # Create buildx builder - # It must be initialized here as opposed to in build_images.py so that parallel calls (such as agent builds) can access it - # and not face race conditions - ensure_buildx_builder(DEFAULT_BUILDER_NAME) build_image(args.image, build_config) From 5b426636469c1bbc1f51ce7f91f08360f3067482 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Fri, 8 Aug 2025 16:09:11 +0300 Subject: [PATCH 119/164] Add test image building --- .evergreen-functions.yml | 12 ++++++++++++ .evergreen.yml | 10 ++++++++++ scripts/evergreen/e2e/build_e2e_image_ibm.sh | 14 ++++++++++++++ scripts/evergreen/e2e/single_e2e.sh | 15 +++++++++++++++ 4 files changed, 51 insertions(+) create mode 100755 scripts/evergreen/e2e/build_e2e_image_ibm.sh diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index 6deabc65c..6e16afdf2 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -527,6 +527,18 @@ functions: - ${workdir}/bin - ${workdir} + build_test_image_ibm: + - *switch_context + - command: subprocess.exec + params: + shell: bash + working_dir: src/github.com/mongodb/mongodb-kubernetes + include_expansions_in_env: + - version_id + add_to_path: + - ${workdir}/bin + binary: scripts/evergreen/e2e/build_e2e_image_ibm.sh + pipeline: - *switch_context - command: shell.exec diff --git a/.evergreen.yml b/.evergreen.yml index f46e46b5a..876abe374 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -82,6 +82,8 @@ variables: setup_group: - func: clone - func: setup_building_host_minikube + - func: build_multi_cluster_binary + - func: build_test_image_ibm - &setup_group_multi_cluster setup_group_can_fail_task: true @@ -431,6 +433,14 @@ tasks: vars: image_name: test + + - name: build_test_image_ibm + commands: + - func: clone + - func: setup_building_host + - func: build_multi_cluster_binary + - func: build_test_image_ibm + - name: build_mco_test_image commands: - func: clone diff --git a/scripts/evergreen/e2e/build_e2e_image_ibm.sh b/scripts/evergreen/e2e/build_e2e_image_ibm.sh new file mode 100755 index 000000000..66aad76c3 --- /dev/null +++ b/scripts/evergreen/e2e/build_e2e_image_ibm.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +source scripts/dev/set_env_context.sh + +cp -rf public docker/mongodb-kubernetes-tests/public +cp release.json docker/mongodb-kubernetes-tests/release.json +cp requirements.txt docker/mongodb-kubernetes-tests/requirements.txt +cp -rf helm_chart docker/mongodb-kubernetes-tests/helm_chart + +echo "Building mongodb-kubernetes-tests image with tag: ${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}" +cd docker/mongodb-kubernetes-tests +sudo podman buildx build --progress plain . -f Dockerfile -t "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" +sudo podman push --authfile="/root/.config/containers/auth.json" "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" + +# docker buildx imagetools create "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" --append "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}-$(arch)" -t "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" diff --git a/scripts/evergreen/e2e/single_e2e.sh b/scripts/evergreen/e2e/single_e2e.sh index 1adb8476c..f14cf9ab9 100755 --- a/scripts/evergreen/e2e/single_e2e.sh +++ b/scripts/evergreen/e2e/single_e2e.sh @@ -27,6 +27,21 @@ deploy_test_app() { tag="${OVERRIDE_VERSION_ID}" fi + local arch + arch=$(uname -m) + + case "${arch}" in + ppc64le) + tag="${tag}-ppc64le" + ;; + s390x) + tag="${tag}-s390x" + ;; + *) + echo "Not IBM host, using default tag" + ;; + esac + IS_PATCH="${IS_PATCH:-default_patch}" TASK_NAME="${TASK_NAME:-default_task}" EXECUTION="${EXECUTION:-default_execution}" From abd618021726c679d71a61a944736fb0158d5f59 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Fri, 8 Aug 2025 16:09:18 +0300 Subject: [PATCH 120/164] Fix dockerfile --- docker/mongodb-kubernetes-database/Dockerfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docker/mongodb-kubernetes-database/Dockerfile b/docker/mongodb-kubernetes-database/Dockerfile index 43f705fdd..97fbda8d0 100644 --- a/docker/mongodb-kubernetes-database/Dockerfile +++ b/docker/mongodb-kubernetes-database/Dockerfile @@ -41,8 +41,7 @@ RUN microdnf install -y --disableplugin=subscription-manager \ jq \ tar \ xz-libs \ - findutils \ - cpio + findutils RUN ln -s /usr/lib64/libsasl2.so.3 /usr/lib64/libsasl2.so.2 From 347b44bc5d21fb1e737b4a175a6ab652af187f12 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 15:17:52 +0200 Subject: [PATCH 121/164] Revert "Handle builder creation race condition with an exception" This reverts commit d2a61532a9bc9c67e24c68017b14be8f03804dcc. --- scripts/release/build_images.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 95347f073..89596bb87 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -46,7 +46,6 @@ def ecr_login_boto3(region: str, account_id: str): def ensure_buildx_builder(builder_name: str = DEFAULT_BUILDER_NAME) -> str: """ Ensures a Docker Buildx builder exists for multi-platform builds. - This function is safe for concurrent execution across multiple processes. :param builder_name: Name for the buildx builder :return: The builder name that was created or reused @@ -70,13 +69,6 @@ def ensure_buildx_builder(builder_name: str = DEFAULT_BUILDER_NAME) -> str: ) logger.info(f"Created new buildx builder: {builder_name}") except DockerException as e: - # Check if this is a race condition (another process created the builder) - if hasattr(e, 'stderr') and 'existing instance for' in str(e.stderr): - logger.info(f"Builder '{builder_name}' was created by another process – using it.") - docker.buildx.use(builder_name) - return builder_name - - # Otherwise, it's a real error logger.error(f"Failed to create buildx builder: {e}") raise From e2a41260031496a107ff421ae286dc7c1ff8db1c Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 15:20:50 +0200 Subject: [PATCH 122/164] Revert "Cleanup ensure" This reverts commit c6fc163b00036c5f0908683a44a22ecf33afaedb. --- scripts/release/build_images.py | 8 +------- scripts/release/pipeline_main.py | 5 +++++ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/scripts/release/build_images.py b/scripts/release/build_images.py index 89596bb87..8b9404eb8 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build_images.py @@ -1,8 +1,5 @@ # This file is the new Sonar import base64 -import fcntl -import os -import time from typing import Dict import boto3 @@ -118,10 +115,7 @@ def execute_docker_build( if len(platforms) > 1: logger.info(f"Multi-platform build for {len(platforms)} architectures") - # Ensure buildx builder exists (safe for concurrent execution) - ensure_buildx_builder(builder_name) - - # Build the image using buildx + # Build the image using buildx, builder must be already initialized docker.buildx.build( context_path=path, file=dockerfile, diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 3f7b9473d..e3b32aaaa 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -34,6 +34,7 @@ BuildContext, BuildScenario, ) +from scripts.release.build_images import DEFAULT_BUILDER_NAME, ensure_buildx_builder """ The goal of main.py, build_configuration.py and build_context.py is to provide a single source of truth for the build @@ -145,6 +146,10 @@ def main(): logger.info(f"Building image: {args.image}") logger.info(f"Build configuration: {build_config}") + # Create buildx builder + # It must be initialized here as opposed to in build_images.py so that parallel calls (such as agent builds) can access it + # and not face race conditions + ensure_buildx_builder(DEFAULT_BUILDER_NAME) build_image(args.image, build_config) From 715c4adb81c0e87d7346fafc64f0ac3b374ccc16 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 15:21:13 +0200 Subject: [PATCH 123/164] Rename trace --- scripts/release/atomic_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index b1473aa2d..a3b5c8479 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -212,7 +212,7 @@ def build_om_image(build_configuration: BuildConfiguration): ) -@TRACER.start_as_current_span("build_image_generic") +@TRACER.start_as_current_span("build_image") def build_image( image_name: str, dockerfile_path: str, From 2125cb750865cd80d7efba6963044cd68a9cd2e0 Mon Sep 17 00:00:00 2001 From: Julien Benhaim Date: Fri, 8 Aug 2025 15:41:23 +0200 Subject: [PATCH 124/164] Remove comment --- scripts/release/atomic_pipeline.py | 1 - 1 file changed, 1 deletion(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index a3b5c8479..f24342ea5 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -223,7 +223,6 @@ def build_image( """ Build an image then (optionally) sign the result. """ - # Tracing setup span = trace.get_current_span() span.set_attribute("mck.image_name", image_name) From fa9c7eee495b53181bf95c5a9399801a347fc9c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Mon, 11 Aug 2025 11:52:18 +0200 Subject: [PATCH 125/164] Review fixes --- scripts/release/atomic_pipeline.py | 2 +- scripts/release/build/image_signing.py | 6 ++---- scripts/release/pipeline_main.py | 24 ++++++++++++------------ 3 files changed, 15 insertions(+), 17 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index a7adc09a1..7baab0598 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -27,7 +27,7 @@ TRACER = trace.get_tracer("evergreen-agent") -@TRACER.start_as_current_span("build_image_generic") +@TRACER.start_as_current_span("build_image") def build_image( dockerfile_path: str, build_configuration: ImageBuildConfiguration, diff --git a/scripts/release/build/image_signing.py b/scripts/release/build/image_signing.py index d50116bb8..6bca81db7 100644 --- a/scripts/release/build/image_signing.py +++ b/scripts/release/build/image_signing.py @@ -230,8 +230,7 @@ def verify_signature(repository: str, tag: str): # Access the content of the file kubernetes_operator_public_key = r.text else: - logger.error(f"Failed to retrieve the public key from {public_key_url}: Status code {r.status_code}") - return + raise Exception(f"Failed to retrieve the public key from {public_key_url}: Status code {r.status_code}") public_key_var_name = "OPERATOR_PUBLIC_KEY" additional_args = [ @@ -245,8 +244,7 @@ def verify_signature(repository: str, tag: str): run_command_with_retries(command, retries=10) except subprocess.CalledProcessError as e: # Fail the pipeline if verification fails - logger.error(f"Failed to verify signature for image {image}: {e.stderr}") - raise + raise Exception(f"Failed to verify signature for image {image}") end_time = time.time() duration = end_time - start_time diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index 49009a488..3e2ff736b 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -65,19 +65,19 @@ def get_builder_function_for_image_name() -> Dict[str, Callable]: """Returns a dictionary of image names that can be built.""" image_builders = { - MEKO_TESTS_IMAGE: build_tests_image, # working - OPERATOR_IMAGE: build_operator_image, # working - MCO_TESTS_IMAGE: build_mco_tests_image, # working - READINESS_PROBE_IMAGE: build_readiness_probe_image, # working - UPGRADE_HOOK_IMAGE: build_upgrade_hook_image, # working - DATABASE_IMAGE: build_database_image, # working - AGENT_IMAGE: build_agent_default_case, # working + MEKO_TESTS_IMAGE: build_tests_image, + OPERATOR_IMAGE: build_operator_image, + MCO_TESTS_IMAGE: build_mco_tests_image, + READINESS_PROBE_IMAGE: build_readiness_probe_image, + UPGRADE_HOOK_IMAGE: build_upgrade_hook_image, + DATABASE_IMAGE: build_database_image, + AGENT_IMAGE: build_agent_default_case, # Init images - INIT_APPDB_IMAGE: build_init_appdb_image, # working - INIT_DATABASE_IMAGE: build_init_database_image, # working - INIT_OPS_MANAGER_IMAGE: build_init_om_image, # working + INIT_APPDB_IMAGE: build_init_appdb_image, + INIT_DATABASE_IMAGE: build_init_database_image, + INIT_OPS_MANAGER_IMAGE: build_init_om_image, # Ops Manager image - OPS_MANAGER_IMAGE: build_om_image, # working + OPS_MANAGER_IMAGE: build_om_image, } return image_builders @@ -189,7 +189,7 @@ def main(): # Override arguments for build context and configuration parser.add_argument( "--platform", - help="Override the platforms instead of resolving from build scenario", + help="Override the platforms instead of resolving from build scenario. Multi-arch builds are comma-separated. Example: linux/amd64,linux/arm64", ) parser.add_argument( "--version", From 597edf26139335ddf5c2db1dfddfdf3551521277 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Mon, 11 Aug 2025 12:08:27 +0200 Subject: [PATCH 126/164] Added comment to get_version_id() method --- scripts/release/constants.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/release/constants.py b/scripts/release/constants.py index 661e13222..dc72cd7d9 100644 --- a/scripts/release/constants.py +++ b/scripts/release/constants.py @@ -29,4 +29,8 @@ def is_running_in_evg() -> bool: def get_version_id() -> str | None: + """ + Get the version ID from the environment variable. This is typically used for patch builds in the Evergreen CI system. + :return: version_id (patch ID) or None if not set + """ return os.getenv("version_id") From 857f705bc22e08b436008b8f796adfbb2f241a1a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Mon, 11 Aug 2025 12:10:45 +0200 Subject: [PATCH 127/164] Revert parallel `max_workers = 1` --- scripts/release/atomic_pipeline.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 3ac097819..190009dc7 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -301,8 +301,7 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration): tasks_queue = Queue() max_workers = 1 if build_configuration.parallel: - # TODO: remove this once we have a proper synchronization for buildx builder concurrent creation - max_workers = 1 + max_workers = None if build_configuration.parallel_factor > 0: max_workers = build_configuration.parallel_factor with ProcessPoolExecutor(max_workers=max_workers) as executor: From 59e442030e189f0ef54582dcfa15fcbba70d4350 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20Kara=C5=9B?= Date: Mon, 11 Aug 2025 12:26:59 +0200 Subject: [PATCH 128/164] Agent image fix --- scripts/release/atomic_pipeline.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 190009dc7..6b43f3011 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -430,7 +430,7 @@ def build_agent_pipeline( } build_image( - dockerfile_path="docker/mongodb-agent/Dockerfile", + dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", build_configuration=build_configuration_copy, build_args=args, ) From 1385fa5b15e676f2dd03f6bb6174d2f4e8fda264 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 11 Aug 2025 15:02:47 +0200 Subject: [PATCH 129/164] add agent multi-arch buildinfo --- build_info.json | 64 ++++++-- build_info_agent.json | 42 +++++ docker/mongodb-agent/README.md | 19 ++- scripts/release/atomic_pipeline.py | 77 ++++++--- scripts/release/atomic_pipeline_test.py | 198 ++++++++++++++++++++++++ 5 files changed, 355 insertions(+), 45 deletions(-) create mode 100644 build_info_agent.json create mode 100644 scripts/release/atomic_pipeline_test.py diff --git a/build_info.json b/build_info.json index c84c3bfd6..ff1ee8699 100644 --- a/build_info.json +++ b/build_info.json @@ -12,7 +12,9 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { @@ -20,7 +22,9 @@ "repository": "quay.io/mongodb/mongodb-kubernetes", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, @@ -36,7 +40,9 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-database", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { @@ -44,7 +50,9 @@ "repository": "quay.io/mongodb/mongodb-kubernetes-init-database", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, @@ -60,7 +68,9 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-appdb", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { @@ -68,7 +78,9 @@ "repository": "quay.io/mongodb/mongodb-kubernetes-init-appdb", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, @@ -84,7 +96,9 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-ops-manager", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { @@ -92,7 +106,9 @@ "repository": "quay.io/mongodb/mongodb-kubernetes-init-ops-manager", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, @@ -108,7 +124,9 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-database", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { @@ -116,7 +134,9 @@ "repository": "quay.io/mongodb/mongodb-kubernetes-database", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, @@ -160,7 +180,9 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { @@ -169,7 +191,9 @@ "repository": "quay.io/mongodb/mongodb-kubernetes-readinessprobe", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, @@ -185,7 +209,9 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { @@ -194,7 +220,9 @@ "repository": "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, @@ -210,7 +238,9 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { @@ -218,7 +248,9 @@ "repository": "quay.io/mongodb/mongodb-agent-ubi", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, diff --git a/build_info_agent.json b/build_info_agent.json new file mode 100644 index 000000000..f10b02dc2 --- /dev/null +++ b/build_info_agent.json @@ -0,0 +1,42 @@ +{ + "platform_mappings": { + "linux/amd64": { + "agent_suffix": "linux_x86_64.tar.gz", + "tools_suffix": "rhel93-x86_64-{TOOLS_VERSION}.tgz" + }, + "linux/arm64": { + "agent_suffix": "amzn2_aarch64.tar.gz", + "tools_suffix": "rhel93-aarch64-{TOOLS_VERSION}.tgz" + }, + "linux/s390x": { + "agent_suffix": "rhel7_s390x.tar.gz", + "tools_suffix": "rhel9-s390x-{TOOLS_VERSION}.tgz" + }, + "linux/ppc64le": { + "agent_suffix": "rhel8_ppc64le.tar.gz", + "tools_suffix": "rhel9-ppc64le-{TOOLS_VERSION}.tgz" + } + }, + "base_names": { + "agent": "mongodb-mms-automation-agent", + "tools": "mongodb-database-tools" + }, + "build_arg_mappings": { + "linux/amd64": { + "agent_build_arg": "mongodb_agent_version_amd64", + "tools_build_arg": "mongodb_tools_version_amd64" + }, + "linux/arm64": { + "agent_build_arg": "mongodb_agent_version_arm64", + "tools_build_arg": "mongodb_tools_version_arm64" + }, + "linux/s390x": { + "agent_build_arg": "mongodb_agent_version_s390x", + "tools_build_arg": "mongodb_tools_version_s390x" + }, + "linux/ppc64le": { + "agent_build_arg": "mongodb_agent_version_ppc64le", + "tools_build_arg": "mongodb_tools_version_ppc64le" + } + } +} diff --git a/docker/mongodb-agent/README.md b/docker/mongodb-agent/README.md index 91fc2ca1f..f8ec6ea20 100644 --- a/docker/mongodb-agent/README.md +++ b/docker/mongodb-agent/README.md @@ -15,20 +15,23 @@ MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" MONGODB_AGENT_URL="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" INIT_DATABASE_IMAGE="${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" +MONGODB_AGENT_BASE="mongodb-mms-automation-agent" +MONGODB_DATABASE_TOOLS_BASE="mongodb-database-tools" + docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent/Dockerfile -t "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}_${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg init_database_image="${INIT_DATABASE_IMAGE}" \ --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL}" \ --build-arg mongodb_agent_url="${MONGODB_AGENT_URL}" \ - --build-arg mongodb_agent_version_s390x="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz" \ - --build-arg mongodb_agent_version_ppc64le="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" \ - --build-arg mongodb_agent_version_amd64="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" \ - --build-arg mongodb_agent_version_arm64="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" \ - --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-${TOOLS_VERSION}.tgz" \ - --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-${TOOLS_VERSION}.tgz" \ - --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-${TOOLS_VERSION}.tgz" \ - --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-${TOOLS_VERSION}.tgz" + --build-arg mongodb_agent_version_s390x="${MONGODB_AGENT_BASE}-${AGENT_VERSION}.rhel7_s390x.tar.gz" \ + --build-arg mongodb_agent_version_ppc64le="${MONGODB_AGENT_BASE}-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" \ + --build-arg mongodb_agent_version_amd64="${MONGODB_AGENT_BASE}-${AGENT_VERSION}.linux_x86_64.tar.gz" \ + --build-arg mongodb_agent_version_arm64="${MONGODB_AGENT_BASE}-${AGENT_VERSION}.amzn2_aarch64.tar.gz" \ + --build-arg mongodb_tools_version_arm64="${MONGODB_DATABASE_TOOLS_BASE}-rhel93-aarch64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_amd64="${MONGODB_DATABASE_TOOLS_BASE}-rhel93-x86_64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_s390x="${MONGODB_DATABASE_TOOLS_BASE}-rhel9-s390x-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_ppc64le="${MONGODB_DATABASE_TOOLS_BASE}-rhel9-ppc64le-${TOOLS_VERSION}.tgz" docker push "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}_${VERSION}" diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 6b43f3011..8dcbec545 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -27,6 +27,47 @@ TRACER = trace.get_tracer("evergreen-agent") +def load_agent_build_info(): + """Load agent platform mappings from build_info_agent.json""" + with open("build_info_agent.json", "r") as f: + return json.load(f) + + +def generate_agent_build_args(platforms: List[str], agent_version: str, tools_version: str) -> Dict[str, str]: + """ + Generate build arguments for agent image based on platform mappings. + + Args: + platforms: List of platforms (e.g., ["linux/amd64", "linux/arm64"]) + agent_version: MongoDB agent version + tools_version: MongoDB tools version + + Returns: + Dictionary of build arguments for docker build + """ + agent_info = load_agent_build_info() + build_args = {} + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + logger.warning(f"Platform {platform} not found in agent mappings, skipping") + continue + + mapping = agent_info["platform_mappings"][platform] + build_mapping = agent_info["build_arg_mappings"][platform] + + # Generate agent build arg + agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" + build_args[build_mapping["agent_build_arg"]] = agent_filename + + # Generate tools build arg + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" + build_args[build_mapping["tools_build_arg"]] = tools_filename + + return build_args + + @TRACER.start_as_current_span("build_image") def build_image( dockerfile_path: str, @@ -308,10 +349,8 @@ def build_agent_default_case(build_configuration: ImageBuildConfiguration): logger.info(f"Running with factor of {max_workers}") logger.info(f"======= Agent versions to build {agent_versions_to_build} =======") for idx, agent_tools_version in enumerate(agent_versions_to_build): - # We don't need to keep create and push the same image on every build. - # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. logger.info(f"======= Building Agent {agent_tools_version} ({idx}/{len(agent_versions_to_build)})") - _build_agent_operator( + _build_agent( agent_tools_version, build_configuration, executor, @@ -382,51 +421,47 @@ def gather_latest_agent_versions(release: Dict) -> List[Tuple[str, str]]: return sorted(list(set(agent_versions_to_build))) -def _build_agent_operator( +def _build_agent( agent_tools_version: Tuple[str, str], build_configuration: ImageBuildConfiguration, executor: ProcessPoolExecutor, tasks_queue: Queue, ): agent_version = agent_tools_version[0] - agent_distro = "rhel9_x86_64" tools_version = agent_tools_version[1] - tools_distro = get_tools_distro(tools_version)["amd"] tasks_queue.put( executor.submit( build_agent_pipeline, build_configuration, - build_configuration.version, agent_version, - agent_distro, - tools_version, - tools_distro, + tools_version ) ) def build_agent_pipeline( build_configuration: ImageBuildConfiguration, - operator_version: str, agent_version: str, - agent_distro: str, tools_version: str, - tools_distro: str, ): - image_version = f"{agent_version}_{operator_version}" - build_configuration_copy = copy(build_configuration) - build_configuration_copy.version = image_version + build_configuration_copy.version = agent_version print( - f"======== Building agent pipeline for version {image_version}, build configuration version: {build_configuration.version}" + f"======== Building agent pipeline for version {agent_version}, build configuration version: {build_configuration.version}" ) + + # Generate platform-specific build arguments using the mapping + platform_build_args = generate_agent_build_args( + platforms=build_configuration.platforms, + agent_version=agent_version, + tools_version=tools_version + ) + args = { - "version": image_version, + "version": agent_version, "agent_version": agent_version, - "agent_distro": agent_distro, - "tools_version": tools_version, - "tools_distro": tools_distro, + **platform_build_args # Add the platform-specific build args } build_image( diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py new file mode 100644 index 000000000..795a19e42 --- /dev/null +++ b/scripts/release/atomic_pipeline_test.py @@ -0,0 +1,198 @@ +#!/usr/bin/env python3 +""" +Test for agent build mapping functionality in atomic_pipeline.py +""" + +import json +import unittest +from unittest.mock import patch + + +def load_agent_build_info(): + """Load agent platform mappings from build_info_agent.json""" + with open("build_info_agent.json", "r") as f: + return json.load(f) + +def generate_agent_build_args(platforms, agent_version, tools_version): + """ + Generate build arguments for agent image based on platform mappings. + This is the actual implementation from atomic_pipeline.py + """ + agent_info = load_agent_build_info() + build_args = {} + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + # Mock the logger warning for testing + print(f"Platform {platform} not found in agent mappings, skipping") + continue + + mapping = agent_info["platform_mappings"][platform] + build_mapping = agent_info["build_arg_mappings"][platform] + + # Generate agent build arg + agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" + build_args[build_mapping["agent_build_arg"]] = agent_filename + + # Generate tools build arg + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" + build_args[build_mapping["tools_build_arg"]] = tools_filename + + return build_args + + +def _parse_dockerfile_build_args(dockerfile_path): + """Parse Dockerfile to extract expected build arguments using proper parsing.""" + build_args = set() + + with open(dockerfile_path, 'r') as f: + lines = f.readlines() + + for line in lines: + line = line.strip() + # Skip comments and empty lines + if not line or line.startswith('#'): + continue + + # Parse ARG instructions + if line.startswith('ARG '): + arg_part = line[4:].strip() # Remove 'ARG ' + + # Handle ARG with default values (ARG name=default) + arg_name = arg_part.split('=')[0].strip() + + build_args.add(arg_name) + + return build_args + + +class TestAgentBuildMapping(unittest.TestCase): + """Test cases for agent build mapping functionality.""" + + def setUp(self): + """Set up test fixtures.""" + # Load the actual build_info_agent.json file + with open("build_info_agent.json", "r") as f: + self.agent_build_info = json.load(f) + + def test_generate_agent_build_args_single_platform(self): + """Test generating build args for a single platform.""" + platforms = ["linux/amd64"] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + expected = { + "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" + } + + self.assertEqual(result, expected) + + def test_generate_agent_build_args_multiple_platforms(self): + """Test generating build args for multiple platforms.""" + platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + expected = { + "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel93-x86_64-100.12.0.tgz", + "mongodb_agent_version_arm64": "mongodb-mms-automation-agent-108.0.7.8810-1.amzn2_aarch64.tar.gz", + "mongodb_tools_version_arm64": "mongodb-database-tools-rhel93-aarch64-100.12.0.tgz", + "mongodb_agent_version_s390x": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel7_s390x.tar.gz", + "mongodb_tools_version_s390x": "mongodb-database-tools-rhel9-s390x-100.12.0.tgz", + "mongodb_agent_version_ppc64le": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel8_ppc64le.tar.gz", + "mongodb_tools_version_ppc64le": "mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" + } + + self.assertEqual(result, expected) + + @patch('builtins.print') + def test_generate_agent_build_args_unknown_platform(self, mock_print): + """Test handling of unknown platforms.""" + platforms = ["linux/amd64", "linux/unknown"] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + # Should only include known platform + expected = { + "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" + } + + self.assertEqual(result, expected) + mock_print.assert_called_once_with("Platform linux/unknown not found in agent mappings, skipping") + + def test_generate_agent_build_args_empty_platforms(self): + """Test generating build args with empty platforms list.""" + platforms = [] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + self.assertEqual(result, {}) + + def test_build_args_match_dockerfile_requirements(self): + """Test that generated build args exactly match what the Dockerfile expects.""" + # Define the expected build args based on the platforms we support + # This is cleaner than parsing the Dockerfile and more explicit about our expectations + expected_dockerfile_args = { + "mongodb_agent_version_amd64", "mongodb_agent_version_arm64", + "mongodb_agent_version_s390x", "mongodb_agent_version_ppc64le", + "mongodb_tools_version_amd64", "mongodb_tools_version_arm64", + "mongodb_tools_version_s390x", "mongodb_tools_version_ppc64le" + } + + # Generate build args for all platforms + platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + generated_build_args = set(result.keys()) + + # Verify that we generate exactly the build args the Dockerfile expects + self.assertEqual(generated_build_args, expected_dockerfile_args, + f"Generated build args {generated_build_args} don't match expected {expected_dockerfile_args}") + + # Verify the format of generated filenames matches what Dockerfile expects + for arg_name, filename in result.items(): + if "agent" in arg_name: + self.assertTrue(filename.startswith("mongodb-mms-automation-agent-")) + self.assertTrue(filename.endswith(".tar.gz")) + elif "tools" in arg_name: + self.assertTrue(filename.startswith("mongodb-database-tools-")) + self.assertTrue(filename.endswith(".tgz")) + + def test_dockerfile_contains_expected_args(self): + """Test that the Dockerfile actually contains the build args we expect.""" + dockerfile_path = "docker/mongodb-agent/Dockerfile.atomic" + + # Read the Dockerfile content + with open(dockerfile_path, 'r') as f: + dockerfile_content = f.read() + + # Define the expected build args + expected_args = [ + "mongodb_agent_version_amd64", "mongodb_agent_version_arm64", + "mongodb_agent_version_s390x", "mongodb_agent_version_ppc64le", + "mongodb_tools_version_amd64", "mongodb_tools_version_arm64", + "mongodb_tools_version_s390x", "mongodb_tools_version_ppc64le" + ] + + # Verify each expected arg is declared in the Dockerfile + for arg_name in expected_args: + self.assertIn(f"ARG {arg_name}", dockerfile_content, + f"Dockerfile should contain 'ARG {arg_name}' declaration") + + +if __name__ == "__main__": + unittest.main() From 0cb392bb8e7aea36afa542aa96fa1fbab2c7f402 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 11 Aug 2025 15:58:27 +0200 Subject: [PATCH 130/164] add multi arch support for all binaries but not part of patch --- .evergreen.yml | 18 ++--- build_info_agent.json | 18 ----- scripts/release/atomic_pipeline.py | 102 ++++++++++++++++++++++-- scripts/release/atomic_pipeline_test.py | 74 ++++++++++++++++- 4 files changed, 177 insertions(+), 35 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index ab4da441c..bd7764cb1 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1488,9 +1488,9 @@ buildvariants: run_on: - rhel9-power-large allowed_requesters: [ "patch", "github_tag" ] -# depends_on: -# - name: build_test_image -# variant: init_test_run + depends_on: + - name: build_test_image + variant: init_test_run tasks: - name: e2e_smoke_ibm_task_group @@ -1500,9 +1500,9 @@ buildvariants: run_on: - rhel9-zseries-small allowed_requesters: [ "patch", "github_tag" ] -# depends_on: -# - name: build_test_image -# variant: init_test_run + depends_on: + - name: build_test_image + variant: init_test_run tasks: - name: e2e_smoke_ibm_task_group @@ -1512,9 +1512,9 @@ buildvariants: run_on: - ubuntu2204-arm64-large allowed_requesters: [ "patch", "github_tag" ] -# depends_on: -# - name: build_test_image -# variant: init_test_run + depends_on: + - name: build_test_image + variant: init_test_run tasks: - name: e2e_smoke_arm_task_group diff --git a/build_info_agent.json b/build_info_agent.json index f10b02dc2..f12cdda21 100644 --- a/build_info_agent.json +++ b/build_info_agent.json @@ -20,23 +20,5 @@ "base_names": { "agent": "mongodb-mms-automation-agent", "tools": "mongodb-database-tools" - }, - "build_arg_mappings": { - "linux/amd64": { - "agent_build_arg": "mongodb_agent_version_amd64", - "tools_build_arg": "mongodb_tools_version_amd64" - }, - "linux/arm64": { - "agent_build_arg": "mongodb_agent_version_arm64", - "tools_build_arg": "mongodb_tools_version_arm64" - }, - "linux/s390x": { - "agent_build_arg": "mongodb_agent_version_s390x", - "tools_build_arg": "mongodb_tools_version_s390x" - }, - "linux/ppc64le": { - "agent_build_arg": "mongodb_agent_version_ppc64le", - "tools_build_arg": "mongodb_tools_version_ppc64le" - } } } diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 8dcbec545..8d34d4f40 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -33,6 +33,73 @@ def load_agent_build_info(): return json.load(f) +def extract_tools_version_from_release(release: Dict) -> str: + """ + Extract tools version from release.json mongodbToolsBundle.ubi field. + + Args: + release: Release dictionary from release.json + + Returns: + Tools version string (e.g., "100.12.2") + """ + tools_bundle = release["mongodbToolsBundle"]["ubi"] + # Extract version from filename like "mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" + # The version is the last part before .tgz + version_part = tools_bundle.split("-")[-1] # Gets "100.12.2.tgz" + tools_version = version_part.replace(".tgz", "") # Gets "100.12.2" + return tools_version + + +def get_build_arg_names(platform: str) -> Dict[str, str]: + """ + Generate build argument names for a platform. + + Args: + platform: Platform string (e.g., "linux/amd64") + + Returns: + Dictionary with agent_build_arg and tools_build_arg keys + """ + # Extract architecture from platform (e.g., "amd64" from "linux/amd64") + arch = platform.split("/")[1] + + return { + "agent_build_arg": f"mongodb_agent_version_{arch}", + "tools_build_arg": f"mongodb_tools_version_{arch}" + } + + +def generate_tools_build_args(platforms: List[str], tools_version: str) -> Dict[str, str]: + """ + Generate build arguments for MongoDB tools based on platform mappings. + + Args: + platforms: List of platforms (e.g., ["linux/amd64", "linux/arm64"]) + tools_version: MongoDB tools version + + Returns: + Dictionary of build arguments for docker build (tools only) + """ + agent_info = load_agent_build_info() + build_args = {} + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + logger.warning(f"Platform {platform} not found in agent mappings, skipping") + continue + + mapping = agent_info["platform_mappings"][platform] + build_arg_names = get_build_arg_names(platform) + + # Generate tools build arg only + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" + build_args[build_arg_names["tools_build_arg"]] = tools_filename + + return build_args + + def generate_agent_build_args(platforms: List[str], agent_version: str, tools_version: str) -> Dict[str, str]: """ Generate build arguments for agent image based on platform mappings. @@ -54,16 +121,16 @@ def generate_agent_build_args(platforms: List[str], agent_version: str, tools_ve continue mapping = agent_info["platform_mappings"][platform] - build_mapping = agent_info["build_arg_mappings"][platform] + build_arg_names = get_build_arg_names(platform) # Generate agent build arg agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" - build_args[build_mapping["agent_build_arg"]] = agent_filename + build_args[build_arg_names["agent_build_arg"]] = agent_filename # Generate tools build arg tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - build_args[build_mapping["tools_build_arg"]] = tools_filename + build_args[build_arg_names["tools_build_arg"]] = tools_filename return build_args @@ -276,7 +343,19 @@ def build_init_appdb_image(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) - args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} + + # Extract tools version and generate platform-specific build args + tools_version = extract_tools_version_from_release(release) + platform_build_args = generate_tools_build_args( + platforms=build_configuration.platforms, + tools_version=tools_version + ) + + args = { + "version": build_configuration.version, + "mongodb_tools_url": mongodb_tools_url_ubi, + **platform_build_args # Add the platform-specific build args + } build_image( dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", @@ -290,7 +369,20 @@ def build_init_database_image(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) - args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} + + # Extract tools version and generate platform-specific build args + tools_version = extract_tools_version_from_release(release) + platform_build_args = generate_tools_build_args( + platforms=build_configuration.platforms, + tools_version=tools_version + ) + + args = { + "version": build_configuration.version, + "mongodb_tools_url_ubi": mongodb_tools_url_ubi, + "mongodb_tools_url": base_url, # Add the base URL for the Dockerfile + **platform_build_args # Add the platform-specific build args + } build_image( "docker/mongodb-kubernetes-init-database/Dockerfile.atomic", diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py index 795a19e42..776e9ea81 100644 --- a/scripts/release/atomic_pipeline_test.py +++ b/scripts/release/atomic_pipeline_test.py @@ -7,12 +7,34 @@ import unittest from unittest.mock import patch +from scripts.release.atomic_pipeline import generate_tools_build_args + + +# Local implementations to avoid import issues + def load_agent_build_info(): """Load agent platform mappings from build_info_agent.json""" with open("build_info_agent.json", "r") as f: return json.load(f) + +def get_build_arg_names(platform): + """Generate build argument names for a platform.""" + arch = platform.split("/")[1] + return { + "agent_build_arg": f"mongodb_agent_version_{arch}", + "tools_build_arg": f"mongodb_tools_version_{arch}" + } + + +def extract_tools_version_from_release(release): + """Extract tools version from release.json mongodbToolsBundle.ubi field.""" + tools_bundle = release["mongodbToolsBundle"]["ubi"] + version_part = tools_bundle.split("-")[-1] # Gets "100.12.2.tgz" + tools_version = version_part.replace(".tgz", "") # Gets "100.12.2" + return tools_version + def generate_agent_build_args(platforms, agent_version, tools_version): """ Generate build arguments for agent image based on platform mappings. @@ -28,16 +50,16 @@ def generate_agent_build_args(platforms, agent_version, tools_version): continue mapping = agent_info["platform_mappings"][platform] - build_mapping = agent_info["build_arg_mappings"][platform] + build_arg_names = get_build_arg_names(platform) # Generate agent build arg agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" - build_args[build_mapping["agent_build_arg"]] = agent_filename + build_args[build_arg_names["agent_build_arg"]] = agent_filename # Generate tools build arg tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - build_args[build_mapping["tools_build_arg"]] = tools_filename + build_args[build_arg_names["tools_build_arg"]] = tools_filename return build_args @@ -193,6 +215,52 @@ def test_dockerfile_contains_expected_args(self): self.assertIn(f"ARG {arg_name}", dockerfile_content, f"Dockerfile should contain 'ARG {arg_name}' declaration") + def test_generate_tools_build_args(self): + """Test generating tools-only build args.""" + platforms = ["linux/amd64", "linux/arm64"] + tools_version = "100.12.0" + + result = generate_tools_build_args(platforms, tools_version) + + expected = { + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel93-x86_64-100.12.0.tgz", + "mongodb_tools_version_arm64": "mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" + } + + self.assertEqual(result, expected) + + def test_extract_tools_version_from_release(self): + """Test extracting tools version from release.json structure.""" + release = { + "mongodbToolsBundle": { + "ubi": "mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" + } + } + + result = extract_tools_version_from_release(release) + self.assertEqual(result, "100.12.2") + + def test_tools_build_args_match_init_dockerfiles(self): + """Test that tools build args match what init-database and init-appdb Dockerfiles expect.""" + platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] + tools_version = "100.12.0" + + result = generate_tools_build_args(platforms, tools_version) + + # Verify all expected tools build args are present (no agent args) + expected_tools_args = { + "mongodb_tools_version_amd64", "mongodb_tools_version_arm64", + "mongodb_tools_version_s390x", "mongodb_tools_version_ppc64le" + } + + generated_args = set(result.keys()) + self.assertEqual(generated_args, expected_tools_args) + + # Verify no agent args are included + for arg_name in result.keys(): + self.assertIn("tools", arg_name) + self.assertNotIn("agent", arg_name) + if __name__ == "__main__": unittest.main() From 9ed77b416d545f4e64c39eaa59457354192c04a6 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 11 Aug 2025 17:14:53 +0200 Subject: [PATCH 131/164] for now build on every patch --- .evergreen.yml | 30 +++++++++++++++++++++++++++--- build_info.json | 35 ++++++++++++++++++++++++++++------- 2 files changed, 55 insertions(+), 10 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index bd7764cb1..ad692c10c 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -83,7 +83,6 @@ variables: - func: clone - func: setup_building_host_minikube - func: build_multi_cluster_binary - - func: build_test_image_ibm - &setup_group_multi_cluster setup_group_can_fail_task: true @@ -1489,7 +1488,19 @@ buildvariants: - rhel9-power-large allowed_requesters: [ "patch", "github_tag" ] depends_on: - - name: build_test_image + - name: build_operator_ubi + variant: init_test_run + - name: build_init_database_image_ubi + variant: init_test_run + - name: build_database_image_ubi + variant: init_test_run + - name: build_init_appdb_images_ubi + variant: init_test_run + - name: build_init_om_images_ubi + variant: init_test_run + - name: build_agent_images_ubi + variant: init_test_run + - name: build_test_image_ibm variant: init_test_run tasks: - name: e2e_smoke_ibm_task_group @@ -1501,7 +1512,19 @@ buildvariants: - rhel9-zseries-small allowed_requesters: [ "patch", "github_tag" ] depends_on: - - name: build_test_image + - name: build_operator_ubi + variant: init_test_run + - name: build_init_database_image_ubi + variant: init_test_run + - name: build_database_image_ubi + variant: init_test_run + - name: build_init_appdb_images_ubi + variant: init_test_run + - name: build_init_om_images_ubi + variant: init_test_run + - name: build_agent_images_ubi + variant: init_test_run + - name: build_test_image_ibm variant: init_test_run tasks: - name: e2e_smoke_ibm_task_group @@ -1730,6 +1753,7 @@ buildvariants: - name: build_agent_images_ubi - name: build_readiness_probe_image - name: build_upgrade_hook_image + - name: build_test_image_ibm # cross compilation with emulation for ibm machines are expensive - name: prepare_aws - name: init_release_agents_on_ecr diff --git a/build_info.json b/build_info.json index ff1ee8699..1db22b5fc 100644 --- a/build_info.json +++ b/build_info.json @@ -4,7 +4,10 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -32,7 +35,10 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -60,7 +66,10 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -88,7 +97,10 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -116,7 +128,10 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -144,13 +159,19 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-tests", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-tests", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, From d90ae2ed57b9d6be62ad6a82e091113390025041 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 11 Aug 2025 17:26:07 +0200 Subject: [PATCH 132/164] fix links --- build_info_agent.json | 6 +++--- scripts/release/atomic_pipeline_test.py | 12 ++++++------ 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/build_info_agent.json b/build_info_agent.json index f12cdda21..a74f96abc 100644 --- a/build_info_agent.json +++ b/build_info_agent.json @@ -2,11 +2,11 @@ "platform_mappings": { "linux/amd64": { "agent_suffix": "linux_x86_64.tar.gz", - "tools_suffix": "rhel93-x86_64-{TOOLS_VERSION}.tgz" + "tools_suffix": "rhel88-x86_64-{TOOLS_VERSION}.tgz" }, "linux/arm64": { - "agent_suffix": "amzn2_aarch64.tar.gz", - "tools_suffix": "rhel93-aarch64-{TOOLS_VERSION}.tgz" + "agent_suffix": "amzn2_aarch64.tar.gz", + "tools_suffix": "rhel88-aarch64-{TOOLS_VERSION}.tgz" }, "linux/s390x": { "agent_suffix": "rhel7_s390x.tar.gz", diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py index 776e9ea81..a708cc42f 100644 --- a/scripts/release/atomic_pipeline_test.py +++ b/scripts/release/atomic_pipeline_test.py @@ -108,7 +108,7 @@ def test_generate_agent_build_args_single_platform(self): expected = { "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz" } self.assertEqual(result, expected) @@ -123,9 +123,9 @@ def test_generate_agent_build_args_multiple_platforms(self): expected = { "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel93-x86_64-100.12.0.tgz", + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", "mongodb_agent_version_arm64": "mongodb-mms-automation-agent-108.0.7.8810-1.amzn2_aarch64.tar.gz", - "mongodb_tools_version_arm64": "mongodb-database-tools-rhel93-aarch64-100.12.0.tgz", + "mongodb_tools_version_arm64": "mongodb-database-tools-rhel88-aarch64-100.12.0.tgz", "mongodb_agent_version_s390x": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel7_s390x.tar.gz", "mongodb_tools_version_s390x": "mongodb-database-tools-rhel9-s390x-100.12.0.tgz", "mongodb_agent_version_ppc64le": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel8_ppc64le.tar.gz", @@ -146,7 +146,7 @@ def test_generate_agent_build_args_unknown_platform(self, mock_print): # Should only include known platform expected = { "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz" } self.assertEqual(result, expected) @@ -223,8 +223,8 @@ def test_generate_tools_build_args(self): result = generate_tools_build_args(platforms, tools_version) expected = { - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel93-x86_64-100.12.0.tgz", - "mongodb_tools_version_arm64": "mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", + "mongodb_tools_version_arm64": "mongodb-database-tools-rhel88-aarch64-100.12.0.tgz" } self.assertEqual(result, expected) From dff3ceb82a83b0460827b5e0cf499ddbc35e8e84 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 11 Aug 2025 19:29:40 +0200 Subject: [PATCH 133/164] fix agent links, make agent conditional --- .evergreen.yml | 17 ++-- docker/mongodb-agent/Dockerfile.atomic | 103 +++++++++++++++++------- scripts/dev/contexts/init_test_run_ibm | 8 ++ scripts/release/atomic_pipeline.py | 6 +- scripts/release/atomic_pipeline_test.py | 50 +++++++++++- scripts/release/build/build_scenario.py | 2 + 6 files changed, 147 insertions(+), 39 deletions(-) create mode 100644 scripts/dev/contexts/init_test_run_ibm diff --git a/.evergreen.yml b/.evergreen.yml index ad692c10c..e2ba0aaa9 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1501,7 +1501,7 @@ buildvariants: - name: build_agent_images_ubi variant: init_test_run - name: build_test_image_ibm - variant: init_test_run + variant: init_test_run_ibm tasks: - name: e2e_smoke_ibm_task_group @@ -1525,7 +1525,7 @@ buildvariants: - name: build_agent_images_ubi variant: init_test_run - name: build_test_image_ibm - variant: init_test_run + variant: init_test_run_ibm tasks: - name: e2e_smoke_ibm_task_group @@ -1535,9 +1535,7 @@ buildvariants: run_on: - ubuntu2204-arm64-large allowed_requesters: [ "patch", "github_tag" ] - depends_on: - - name: build_test_image - variant: init_test_run + <<: *base_no_om_image_dependency tasks: - name: e2e_smoke_arm_task_group @@ -1753,9 +1751,16 @@ buildvariants: - name: build_agent_images_ubi - name: build_readiness_probe_image - name: build_upgrade_hook_image - - name: build_test_image_ibm # cross compilation with emulation for ibm machines are expensive - name: prepare_aws + - name: init_test_run_ibm + display_name: init_test_run_ibm + max_hosts: -1 + run_on: + - ubuntu2204-small + tasks: + - name: build_test_image_ibm + - name: init_release_agents_on_ecr display_name: init_release_agents_on_ecr # this enables us to run this variant either manually (patch) which pct does or during an OM bump (github_pr) diff --git a/docker/mongodb-agent/Dockerfile.atomic b/docker/mongodb-agent/Dockerfile.atomic index 0fe129815..de9b32121 100644 --- a/docker/mongodb-agent/Dockerfile.atomic +++ b/docker/mongodb-agent/Dockerfile.atomic @@ -1,40 +1,82 @@ -FROM scratch AS tools_downloader +FROM alpine:latest AS tools_downloader ARG mongodb_tools_url -ARG mongodb_tools_version_amd64 -ADD "${mongodb_tools_url}/${mongodb_tools_version_amd64}" /data/amd64/mongodb_tools.tgz +# Set default empty values for all platforms +ARG mongodb_tools_version_amd64="" +ARG mongodb_tools_version_arm64="" +ARG mongodb_tools_version_s390x="" +ARG mongodb_tools_version_ppc64le="" -ARG mongodb_tools_version_arm64 -ADD "${mongodb_tools_url}/${mongodb_tools_version_arm64}" /data/arm64/mongodb_tools.tgz +# Create directories +RUN mkdir -p /data/amd64 /data/arm64 /data/s390x /data/ppc64le -ARG mongodb_tools_version_s390x -ADD "${mongodb_tools_url}/${mongodb_tools_version_s390x}" /data/s390x/mongodb_tools.tgz +# Conditionally download only if the argument is provided +RUN if [ -n "$mongodb_tools_version_amd64" ]; then \ + wget -O /data/amd64/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_amd64}"; \ + fi -ARG mongodb_tools_version_ppc64le -ADD "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}" /data/ppc64le/mongodb_tools.tgz +RUN if [ -n "$mongodb_tools_version_arm64" ]; then \ + wget -O /data/arm64/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_arm64}"; \ + fi -FROM scratch AS agent_downloader +RUN if [ -n "$mongodb_tools_version_s390x" ]; then \ + wget -O /data/s390x/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_s390x}"; \ + fi + +RUN if [ -n "$mongodb_tools_version_ppc64le" ]; then \ + wget -O /data/ppc64le/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}"; \ + fi + +FROM alpine:latest AS agent_downloader ARG mongodb_agent_url -ARG mongodb_agent_version_amd64 -ADD "${mongodb_agent_url}/${mongodb_agent_version_amd64}" /data/amd64/mongodb_agent.tgz +# Set default empty values for all platforms +ARG mongodb_agent_version_amd64="" +ARG mongodb_agent_version_arm64="" +ARG mongodb_agent_version_s390x="" +ARG mongodb_agent_version_ppc64le="" + +# Create directories +RUN mkdir -p /data/amd64 /data/arm64 /data/s390x /data/ppc64le + +# Conditionally download only if the argument is provided +RUN if [ -n "$mongodb_agent_version_amd64" ]; then \ + wget -O /data/amd64/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_amd64}"; \ + fi -ARG mongodb_agent_version_arm64 -ADD "${mongodb_agent_url}/${mongodb_agent_version_arm64}" /data/arm64/mongodb_agent.tgz +RUN if [ -n "$mongodb_agent_version_arm64" ]; then \ + wget -O /data/arm64/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_arm64}"; \ + fi -ARG mongodb_agent_version_s390x -ADD "${mongodb_agent_url}/${mongodb_agent_version_s390x}" /data/s390x/mongodb_agent.tgz +RUN if [ -n "$mongodb_agent_version_s390x" ]; then \ + wget -O /data/s390x/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_s390x}"; \ + fi -ARG mongodb_agent_version_ppc64le -ADD "${mongodb_agent_url}/${mongodb_agent_version_ppc64le}" /data/ppc64le/mongodb_agent.tgz +RUN if [ -n "$mongodb_agent_version_ppc64le" ]; then \ + wget -O /data/ppc64le/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_ppc64le}"; \ + fi FROM registry.access.redhat.com/ubi9/ubi-minimal ARG TARGETARCH -COPY --from=tools_downloader "/data/${TARGETARCH}/mongodb_tools.tgz" /tools/mongodb_tools.tgz -COPY --from=agent_downloader "/data/${TARGETARCH}/mongodb_agent.tgz" /agent/mongodb_agent.tgz + +# Create directories first +RUN mkdir -p /tools /agent + +# Copy the entire platform directory and handle missing files gracefully +COPY --from=tools_downloader "/data/" /tmp/tools_data/ +COPY --from=agent_downloader "/data/" /tmp/agent_data/ + +# Move files to the correct location if they exist +RUN if [ -f "/tmp/tools_data/${TARGETARCH}/mongodb_tools.tgz" ]; then \ + mv "/tmp/tools_data/${TARGETARCH}/mongodb_tools.tgz" /tools/mongodb_tools.tgz; \ + fi && \ + if [ -f "/tmp/agent_data/${TARGETARCH}/mongodb_agent.tgz" ]; then \ + mv "/tmp/agent_data/${TARGETARCH}/mongodb_agent.tgz" /agent/mongodb_agent.tgz; \ + fi && \ + rm -rf /tmp/tools_data /tmp/agent_data # Replace libcurl-minimal and curl-minimal with the full versions # https://bugzilla.redhat.com/show_bug.cgi?id=1994521 @@ -71,15 +113,22 @@ COPY ./docker/mongodb-agent/setup-agent-files.sh /opt/scripts/setup-agent-files. COPY ./docker/mongodb-agent/dummy-probe.sh /opt/scripts/dummy-probe.sh COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /opt/scripts/dummy-readinessprobe.sh -RUN tar xfz /agent/mongodb_agent.tgz \ - && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ - && chmod +x /agent/mongodb-agent \ +# Extract agent files if they exist +RUN if [ -f "/agent/mongodb_agent.tgz" ]; then \ + tar xfz /agent/mongodb_agent.tgz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && rm /agent/mongodb_agent.tgz \ + && rm -r mongodb-mms-automation-agent-*; \ + fi \ && mkdir -p /var/lib/automation/config \ - && chmod -R +r /var/lib/automation/config \ - && rm /agent/mongodb_agent.tgz \ - && rm -r mongodb-mms-automation-agent-* + && chmod -R +r /var/lib/automation/config -RUN tar xfz /tools/mongodb_tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /tools/mongodb_tools.tgz +# Extract tools files if they exist +RUN if [ -f "/tools/mongodb_tools.tgz" ]; then \ + tar xfz /tools/mongodb_tools.tgz --directory /var/lib/mongodb-mms-automation/ \ + && rm /tools/mongodb_tools.tgz; \ + fi ARG version diff --git a/scripts/dev/contexts/init_test_run_ibm b/scripts/dev/contexts/init_test_run_ibm new file mode 100644 index 000000000..7f7f635e9 --- /dev/null +++ b/scripts/dev/contexts/init_test_run_ibm @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 8d34d4f40..c29bc46cd 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -342,7 +342,6 @@ def build_om_image(build_configuration: ImageBuildConfiguration): def build_init_appdb_image(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db/" - mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) # Extract tools version and generate platform-specific build args tools_version = extract_tools_version_from_release(release) @@ -353,7 +352,7 @@ def build_init_appdb_image(build_configuration: ImageBuildConfiguration): args = { "version": build_configuration.version, - "mongodb_tools_url": mongodb_tools_url_ubi, + "mongodb_tools_url": base_url, # Base URL for platform-specific downloads **platform_build_args # Add the platform-specific build args } @@ -379,7 +378,6 @@ def build_init_database_image(build_configuration: ImageBuildConfiguration): args = { "version": build_configuration.version, - "mongodb_tools_url_ubi": mongodb_tools_url_ubi, "mongodb_tools_url": base_url, # Add the base URL for the Dockerfile **platform_build_args # Add the platform-specific build args } @@ -553,6 +551,8 @@ def build_agent_pipeline( args = { "version": agent_version, "agent_version": agent_version, + "mongodb_agent_url": "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod", # TODO: migrate to build info, + "mongodb_tools_url": "https://fastdl.mongodb.org/tools/db", # TODO: migrate to build info, **platform_build_args # Add the platform-specific build args } diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py index a708cc42f..d495ec283 100644 --- a/scripts/release/atomic_pipeline_test.py +++ b/scripts/release/atomic_pipeline_test.py @@ -7,9 +7,6 @@ import unittest from unittest.mock import patch -from scripts.release.atomic_pipeline import generate_tools_build_args - - # Local implementations to avoid import issues @@ -35,6 +32,28 @@ def extract_tools_version_from_release(release): tools_version = version_part.replace(".tgz", "") # Gets "100.12.2" return tools_version + +def generate_tools_build_args(platforms, tools_version): + """Generate build arguments for MongoDB tools based on platform mappings.""" + agent_info = load_agent_build_info() + build_args = {} + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + print(f"Platform {platform} not found in agent mappings, skipping") + continue + + mapping = agent_info["platform_mappings"][platform] + build_arg_names = get_build_arg_names(platform) + + # Generate tools build arg only + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" + build_args[build_arg_names["tools_build_arg"]] = tools_filename + + return build_args + + def generate_agent_build_args(platforms, agent_version, tools_version): """ Generate build arguments for agent image based on platform mappings. @@ -261,6 +280,31 @@ def test_tools_build_args_match_init_dockerfiles(self): self.assertIn("tools", arg_name) self.assertNotIn("agent", arg_name) + def test_url_construction_correctness(self): + """Test that URLs are constructed correctly with proper trailing slashes.""" + # Test agent build args URL construction + platforms = ["linux/amd64"] + agent_version = "108.0.12.8846-1" + tools_version = "100.12.2" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + agent_base_url = "https://fastdl.mongodb.org/tools/mms-automation/" + tools_base_url = "https://fastdl.mongodb.org/tools/db/" + + agent_filename = result["mongodb_agent_version_amd64"] + tools_filename = result["mongodb_tools_version_amd64"] + + agent_url = f"{agent_base_url}{agent_filename}" + tools_url = f"{tools_base_url}{tools_filename}" + + expected_agent_url = "https://fastdl.mongodb.org/tools/mms-automation/mongodb-mms-automation-agent-108.0.12.8846-1.linux_x86_64.tar.gz" + expected_tools_url = "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" + + self.assertEqual(agent_url, expected_agent_url) + self.assertEqual(tools_url, expected_tools_url) + + if __name__ == "__main__": unittest.main() diff --git a/scripts/release/build/build_scenario.py b/scripts/release/build/build_scenario.py index 3d83288b7..4650b581d 100644 --- a/scripts/release/build/build_scenario.py +++ b/scripts/release/build/build_scenario.py @@ -54,5 +54,7 @@ def get_version(self, repository_path: str, changelog_sub_path: str, initial_com return repo.head.object.hexsha[:COMMIT_SHA_LENGTH] case BuildScenario.RELEASE: return calculate_next_version(repo, changelog_sub_path, initial_commit_sha, initial_version) + case BuildScenario.DEVELOPMENT: + return "test" raise ValueError(f"Unknown build scenario: {self}") From 66d2eb4b9c05875a96631373fb1b307957733632 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 12 Aug 2025 11:54:54 +0200 Subject: [PATCH 134/164] make agent targetplatform and .evergreen power and z support and fix for init images --- .evergreen.yml | 22 ++- docker/mongodb-agent/Dockerfile.atomic | 125 +++++++----------- .../Dockerfile.atomic | 30 +++-- scripts/release/atomic_pipeline.py | 12 +- scripts/release/atomic_pipeline_test.py | 16 ++- 5 files changed, 105 insertions(+), 100 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index e2ba0aaa9..c6c906698 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1485,6 +1485,7 @@ buildvariants: display_name: e2e_smoke_ibm_power tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] run_on: + - rhel9-power-small - rhel9-power-large allowed_requesters: [ "patch", "github_tag" ] depends_on: @@ -1501,7 +1502,7 @@ buildvariants: - name: build_agent_images_ubi variant: init_test_run - name: build_test_image_ibm - variant: init_test_run_ibm + variant: init_test_run_ibm_power tasks: - name: e2e_smoke_ibm_task_group @@ -1510,6 +1511,7 @@ buildvariants: tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] run_on: - rhel9-zseries-small + - rhel9-zseries-large allowed_requesters: [ "patch", "github_tag" ] depends_on: - name: build_operator_ubi @@ -1525,7 +1527,7 @@ buildvariants: - name: build_agent_images_ubi variant: init_test_run - name: build_test_image_ibm - variant: init_test_run_ibm + variant: init_test_run_ibm_z tasks: - name: e2e_smoke_ibm_task_group @@ -1753,11 +1755,21 @@ buildvariants: - name: build_upgrade_hook_image - name: prepare_aws - - name: init_test_run_ibm - display_name: init_test_run_ibm + - name: init_test_run_ibm_power + display_name: init_test_run_ibm_power max_hosts: -1 run_on: - - ubuntu2204-small + - rhel9-power-small + - rhel9-power-large + tasks: + - name: build_test_image_ibm + + - name: init_test_run_ibm_z + display_name: init_test_run_ibm_z + max_hosts: -1 + run_on: + - rhel9-zseries-small + - rhel9-zseries-large tasks: - name: build_test_image_ibm diff --git a/docker/mongodb-agent/Dockerfile.atomic b/docker/mongodb-agent/Dockerfile.atomic index de9b32121..3ca58b7ea 100644 --- a/docker/mongodb-agent/Dockerfile.atomic +++ b/docker/mongodb-agent/Dockerfile.atomic @@ -1,62 +1,53 @@ -FROM alpine:latest AS tools_downloader +FROM registry.access.redhat.com/ubi8/ubi-minimal AS tools_downloader ARG mongodb_tools_url +ARG mongodb_tools_version_s390x +ARG mongodb_tools_version_ppc64le +ARG mongodb_tools_version_amd64 +ARG mongodb_tools_version_arm64 -# Set default empty values for all platforms -ARG mongodb_tools_version_amd64="" -ARG mongodb_tools_version_arm64="" -ARG mongodb_tools_version_s390x="" -ARG mongodb_tools_version_ppc64le="" +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip curl \ + && microdnf clean all -# Create directories -RUN mkdir -p /data/amd64 /data/arm64 /data/s390x /data/ppc64le +RUN case ${TARGETPLATFORM} in \ + "linux/amd64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_amd64} ;; \ + "linux/arm64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_arm64} ;; \ + "linux/s390x") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_s390x} ;; \ + "linux/ppc64le") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_ppc64le} ;; \ + esac \ + && mkdir -p /tools \ + && curl -o /tools/mongodb_tools.tgz "${mongodb_tools_url}/${MONGODB_TOOLS_VERSION}" -# Conditionally download only if the argument is provided -RUN if [ -n "$mongodb_tools_version_amd64" ]; then \ - wget -O /data/amd64/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_amd64}"; \ - fi +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz -RUN if [ -n "$mongodb_tools_version_arm64" ]; then \ - wget -O /data/arm64/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_arm64}"; \ - fi - -RUN if [ -n "$mongodb_tools_version_s390x" ]; then \ - wget -O /data/s390x/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_s390x}"; \ - fi - -RUN if [ -n "$mongodb_tools_version_ppc64le" ]; then \ - wget -O /data/ppc64le/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}"; \ - fi - -FROM alpine:latest AS agent_downloader +FROM registry.access.redhat.com/ubi8/ubi-minimal AS agent_downloader ARG mongodb_agent_url - -# Set default empty values for all platforms -ARG mongodb_agent_version_amd64="" -ARG mongodb_agent_version_arm64="" -ARG mongodb_agent_version_s390x="" -ARG mongodb_agent_version_ppc64le="" - -# Create directories -RUN mkdir -p /data/amd64 /data/arm64 /data/s390x /data/ppc64le - -# Conditionally download only if the argument is provided -RUN if [ -n "$mongodb_agent_version_amd64" ]; then \ - wget -O /data/amd64/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_amd64}"; \ - fi - -RUN if [ -n "$mongodb_agent_version_arm64" ]; then \ - wget -O /data/arm64/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_arm64}"; \ - fi - -RUN if [ -n "$mongodb_agent_version_s390x" ]; then \ - wget -O /data/s390x/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_s390x}"; \ - fi - -RUN if [ -n "$mongodb_agent_version_ppc64le" ]; then \ - wget -O /data/ppc64le/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_ppc64le}"; \ - fi +ARG mongodb_agent_version_s390x +ARG mongodb_agent_version_ppc64le +ARG mongodb_agent_version_amd64 +ARG mongodb_agent_version_arm64 + +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip curl \ + && microdnf clean all + +RUN case ${TARGETPLATFORM} in \ + "linux/amd64") export MONGODB_AGENT_VERSION=${mongodb_agent_version_amd64} ;; \ + "linux/arm64") export MONGODB_AGENT_VERSION=${mongodb_agent_version_arm64} ;; \ + "linux/s390x") export MONGODB_AGENT_VERSION=${mongodb_agent_version_s390x} ;; \ + "linux/ppc64le") export MONGODB_AGENT_VERSION=${mongodb_agent_version_ppc64le} ;; \ + esac \ + && mkdir -p /agent \ + && curl -o /agent/mongodb_agent.tgz "${mongodb_agent_url}/${MONGODB_AGENT_VERSION}" + +RUN tar xfz /agent/mongodb_agent.tgz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && rm /agent/mongodb_agent.tgz \ + && rm -r mongodb-mms-automation-agent-* FROM registry.access.redhat.com/ubi9/ubi-minimal @@ -65,18 +56,9 @@ ARG TARGETARCH # Create directories first RUN mkdir -p /tools /agent -# Copy the entire platform directory and handle missing files gracefully -COPY --from=tools_downloader "/data/" /tmp/tools_data/ -COPY --from=agent_downloader "/data/" /tmp/agent_data/ - -# Move files to the correct location if they exist -RUN if [ -f "/tmp/tools_data/${TARGETARCH}/mongodb_tools.tgz" ]; then \ - mv "/tmp/tools_data/${TARGETARCH}/mongodb_tools.tgz" /tools/mongodb_tools.tgz; \ - fi && \ - if [ -f "/tmp/agent_data/${TARGETARCH}/mongodb_agent.tgz" ]; then \ - mv "/tmp/agent_data/${TARGETARCH}/mongodb_agent.tgz" /agent/mongodb_agent.tgz; \ - fi && \ - rm -rf /tmp/tools_data /tmp/agent_data +# Copy the extracted tools and agent from the downloader stages +COPY --from=tools_downloader "/tools/" /tools/ +COPY --from=agent_downloader "/agent/" /agent/ # Replace libcurl-minimal and curl-minimal with the full versions # https://bugzilla.redhat.com/show_bug.cgi?id=1994521 @@ -113,23 +95,10 @@ COPY ./docker/mongodb-agent/setup-agent-files.sh /opt/scripts/setup-agent-files. COPY ./docker/mongodb-agent/dummy-probe.sh /opt/scripts/dummy-probe.sh COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /opt/scripts/dummy-readinessprobe.sh -# Extract agent files if they exist -RUN if [ -f "/agent/mongodb_agent.tgz" ]; then \ - tar xfz /agent/mongodb_agent.tgz \ - && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ - && chmod +x /agent/mongodb-agent \ - && rm /agent/mongodb_agent.tgz \ - && rm -r mongodb-mms-automation-agent-*; \ - fi \ - && mkdir -p /var/lib/automation/config \ +# Set up directories and permissions (agent and tools are already extracted in downloader stages) +RUN mkdir -p /var/lib/automation/config \ && chmod -R +r /var/lib/automation/config -# Extract tools files if they exist -RUN if [ -f "/tools/mongodb_tools.tgz" ]; then \ - tar xfz /tools/mongodb_tools.tgz --directory /var/lib/mongodb-mms-automation/ \ - && rm /tools/mongodb_tools.tgz; \ - fi - ARG version LABEL name="MongoDB Agent" \ diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile.atomic b/docker/mongodb-kubernetes-init-database/Dockerfile.atomic index 3f38f7870..e363ac281 100644 --- a/docker/mongodb-kubernetes-init-database/Dockerfile.atomic +++ b/docker/mongodb-kubernetes-init-database/Dockerfile.atomic @@ -1,18 +1,30 @@ -FROM scratch AS tools_downloader +FROM registry.access.redhat.com/ubi8/ubi-minimal AS tools_downloader ARG mongodb_tools_url - +ARG mongodb_tools_version_s390x +ARG mongodb_tools_version_ppc64le ARG mongodb_tools_version_amd64 -ADD "${mongodb_tools_url}/${mongodb_tools_version_amd64}" /data/amd64/mongodb_tools.tgz - ARG mongodb_tools_version_arm64 -ADD "${mongodb_tools_url}/${mongodb_tools_version_arm64}" /data/arm64/mongodb_tools.tgz -ARG mongodb_tools_version_s390x -ADD "${mongodb_tools_url}/${mongodb_tools_version_s390x}" /data/s390x/mongodb_tools.tgz +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip curl \ + && microdnf clean all -ARG mongodb_tools_version_ppc64le -ADD "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}" /data/ppc64le/mongodb_tools.tgz +RUN case ${TARGETPLATFORM} in \ + "linux/amd64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_amd64} ;; \ + "linux/arm64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_arm64} ;; \ + "linux/s390x") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_s390x} ;; \ + "linux/ppc64le") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_ppc64le} ;; \ + esac \ + && mkdir -p /tools \ + && if [ -n "$MONGODB_TOOLS_VERSION" ]; then \ + curl -o /tools/mongodb_tools.tgz "${mongodb_tools_url}/${MONGODB_TOOLS_VERSION}"; \ + fi + +RUN if [ -f "/tools/mongodb_tools.tgz" ]; then \ + tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz; \ + fi FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index c29bc46cd..237382417 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -341,7 +341,7 @@ def build_om_image(build_configuration: ImageBuildConfiguration): def build_init_appdb_image(build_configuration: ImageBuildConfiguration): release = load_release_file() - base_url = "https://fastdl.mongodb.org/tools/db/" + base_url = "https://fastdl.mongodb.org/tools/db" # Extract tools version and generate platform-specific build args tools_version = extract_tools_version_from_release(release) @@ -366,7 +366,7 @@ def build_init_appdb_image(build_configuration: ImageBuildConfiguration): # TODO: nam static: remove this once static containers becomes the default def build_init_database_image(build_configuration: ImageBuildConfiguration): release = load_release_file() - base_url = "https://fastdl.mongodb.org/tools/db/" + base_url = "https://fastdl.mongodb.org/tools/db" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) # Extract tools version and generate platform-specific build args @@ -548,11 +548,15 @@ def build_agent_pipeline( tools_version=tools_version ) + # Use centralized base URLs (without trailing slashes to avoid double slashes in Dockerfile) + agent_base_url = "https://fastdl.mongodb.org/tools/mms-automation" + tools_base_url = "https://fastdl.mongodb.org/tools/db" + args = { "version": agent_version, "agent_version": agent_version, - "mongodb_agent_url": "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod", # TODO: migrate to build info, - "mongodb_tools_url": "https://fastdl.mongodb.org/tools/db", # TODO: migrate to build info, + "mongodb_agent_url": agent_base_url, + "mongodb_tools_url": tools_base_url, **platform_build_args # Add the platform-specific build args } diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py index d495ec283..90d186bdf 100644 --- a/scripts/release/atomic_pipeline_test.py +++ b/scripts/release/atomic_pipeline_test.py @@ -289,14 +289,15 @@ def test_url_construction_correctness(self): result = generate_agent_build_args(platforms, agent_version, tools_version) - agent_base_url = "https://fastdl.mongodb.org/tools/mms-automation/" - tools_base_url = "https://fastdl.mongodb.org/tools/db/" + agent_base_url = "https://fastdl.mongodb.org/tools/mms-automation" + tools_base_url = "https://fastdl.mongodb.org/tools/db" agent_filename = result["mongodb_agent_version_amd64"] tools_filename = result["mongodb_tools_version_amd64"] - agent_url = f"{agent_base_url}{agent_filename}" - tools_url = f"{tools_base_url}{tools_filename}" + # Test URL construction (what happens in Dockerfile: ${base_url}/${filename}) + agent_url = f"{agent_base_url}/{agent_filename}" + tools_url = f"{tools_base_url}/{tools_filename}" expected_agent_url = "https://fastdl.mongodb.org/tools/mms-automation/mongodb-mms-automation-agent-108.0.12.8846-1.linux_x86_64.tar.gz" expected_tools_url = "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" @@ -304,6 +305,13 @@ def test_url_construction_correctness(self): self.assertEqual(agent_url, expected_agent_url) self.assertEqual(tools_url, expected_tools_url) + # Verify no double slashes (common mistake) + self.assertNotIn("//", agent_url.replace("https://", "")) + self.assertNotIn("//", tools_url.replace("https://", "")) + + # Verify base URLs do NOT end with slash (to avoid double slashes in Dockerfile) + self.assertFalse(agent_base_url.endswith("/")) + self.assertFalse(tools_base_url.endswith("/")) if __name__ == "__main__": From 5d0000251fa60b90872e86cf5e7da0d4ace608f8 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 12 Aug 2025 13:08:37 +0200 Subject: [PATCH 135/164] fix agent and init dbs --- docker/mongodb-kubernetes-init-database/Dockerfile.atomic | 7 +++---- scripts/release/atomic_pipeline.py | 3 +-- scripts/release/atomic_pipeline_test.py | 4 ++-- 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile.atomic b/docker/mongodb-kubernetes-init-database/Dockerfile.atomic index e363ac281..4a27f93ca 100644 --- a/docker/mongodb-kubernetes-init-database/Dockerfile.atomic +++ b/docker/mongodb-kubernetes-init-database/Dockerfile.atomic @@ -57,15 +57,14 @@ COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ FROM registry.access.redhat.com/ubi8/ubi-minimal ARG TARGETARCH -COPY --from=tools_downloader /data/${TARGETARCH}/mongodb_tools.tgz /tools/mongodb_tools.tgz + +# Copy the extracted tools from the downloader stage (tools are already extracted there) +COPY --from=tools_downloader /tools/ /tools/ RUN microdnf -y update --nodocs \ && microdnf -y install --nodocs tar gzip \ && microdnf clean all -RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ - && rm /tools/mongodb_tools.tgz - COPY --from=base /data/readinessprobe /probes/readinessprobe COPY --from=base /data/probe.sh /probes/probe.sh COPY --from=base /data/scripts/ /scripts/ diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 237382417..0ad5b9690 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -548,8 +548,7 @@ def build_agent_pipeline( tools_version=tools_version ) - # Use centralized base URLs (without trailing slashes to avoid double slashes in Dockerfile) - agent_base_url = "https://fastdl.mongodb.org/tools/mms-automation" + agent_base_url = "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" tools_base_url = "https://fastdl.mongodb.org/tools/db" args = { diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py index 90d186bdf..6f19b9acd 100644 --- a/scripts/release/atomic_pipeline_test.py +++ b/scripts/release/atomic_pipeline_test.py @@ -289,7 +289,7 @@ def test_url_construction_correctness(self): result = generate_agent_build_args(platforms, agent_version, tools_version) - agent_base_url = "https://fastdl.mongodb.org/tools/mms-automation" + agent_base_url = "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" tools_base_url = "https://fastdl.mongodb.org/tools/db" agent_filename = result["mongodb_agent_version_amd64"] @@ -299,7 +299,7 @@ def test_url_construction_correctness(self): agent_url = f"{agent_base_url}/{agent_filename}" tools_url = f"{tools_base_url}/{tools_filename}" - expected_agent_url = "https://fastdl.mongodb.org/tools/mms-automation/mongodb-mms-automation-agent-108.0.12.8846-1.linux_x86_64.tar.gz" + expected_agent_url = "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-108.0.12.8846-1.linux_x86_64.tar.gz" expected_tools_url = "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" self.assertEqual(agent_url, expected_agent_url) From 0c2ab1de76519fc7d9b67e184189219cafb54326 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 12 Aug 2025 16:15:47 +0200 Subject: [PATCH 136/164] try minikube setup --- .evergreen.yml | 2 +- scripts/evergreen/e2e/build_e2e_image_ibm.sh | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index c6c906698..4531220ac 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -436,7 +436,7 @@ tasks: - name: build_test_image_ibm commands: - func: clone - - func: setup_building_host + - func: setup_building_host_minikube - func: build_multi_cluster_binary - func: build_test_image_ibm diff --git a/scripts/evergreen/e2e/build_e2e_image_ibm.sh b/scripts/evergreen/e2e/build_e2e_image_ibm.sh index 66aad76c3..8d5e8ec3c 100755 --- a/scripts/evergreen/e2e/build_e2e_image_ibm.sh +++ b/scripts/evergreen/e2e/build_e2e_image_ibm.sh @@ -8,7 +8,9 @@ cp -rf helm_chart docker/mongodb-kubernetes-tests/helm_chart echo "Building mongodb-kubernetes-tests image with tag: ${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}" cd docker/mongodb-kubernetes-tests -sudo podman buildx build --progress plain . -f Dockerfile -t "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" -sudo podman push --authfile="/root/.config/containers/auth.json" "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" +#sudo podman buildx build --progress plain . -f Dockerfile -t "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" +#sudo podman push --authfile="/root/.config/containers/auth.json" "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" +docker buildx build --progress plain . -f Dockerfile -t "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" +docker push "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" # docker buildx imagetools create "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" --append "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}-$(arch)" -t "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" From 2b0e01993f4129a68d1b3103a36cec7cf84fff47 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 12 Aug 2025 16:44:32 +0200 Subject: [PATCH 137/164] fix rhel path --- build_info_agent.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/build_info_agent.json b/build_info_agent.json index a74f96abc..634a316c6 100644 --- a/build_info_agent.json +++ b/build_info_agent.json @@ -2,11 +2,11 @@ "platform_mappings": { "linux/amd64": { "agent_suffix": "linux_x86_64.tar.gz", - "tools_suffix": "rhel88-x86_64-{TOOLS_VERSION}.tgz" + "tools_suffix": "rhel93-x86_64-{TOOLS_VERSION}.tgz" }, "linux/arm64": { "agent_suffix": "amzn2_aarch64.tar.gz", - "tools_suffix": "rhel88-aarch64-{TOOLS_VERSION}.tgz" + "tools_suffix": "rhel93-aarch64-{TOOLS_VERSION}.tgz" }, "linux/s390x": { "agent_suffix": "rhel7_s390x.tar.gz", From 009bea535531858a49ba4366ae31d21e09a9af48 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 13 Aug 2025 11:13:55 +0200 Subject: [PATCH 138/164] linter, reordering minkube, fix agents --- .evergreen-functions.yml | 6 +- .evergreen.yml | 5 +- docker/mongodb-agent/Dockerfile.atomic | 2 + scripts/dev/configure_container_auth.sh | 18 ++--- scripts/evergreen/e2e/build_e2e_image_ibm.sh | 8 +- scripts/evergreen/setup_kind.sh | 17 +++-- scripts/evergreen/setup_minikube_host.sh | 14 +--- scripts/release/atomic_pipeline.py | 34 +++------ scripts/release/atomic_pipeline_test.py | 77 +++++++++++--------- 9 files changed, 89 insertions(+), 92 deletions(-) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index ed03ee798..df7a1cfe5 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -262,13 +262,13 @@ functions: # Configures docker authentication to ECR and RH registries. setup_building_host: - *switch_context + - *python_venv - *setup_aws - - *configure_docker_auth - *setup_evg_host - - *python_venv + - *configure_docker_auth # This differs for normal evg_host as we require minikube instead of kind for - # IBM machines and install aws cli via pip instead + # IBM machines also install aws cli via pip instead and use podman setup_building_host_minikube: - *switch_context - command: subprocess.exec diff --git a/.evergreen.yml b/.evergreen.yml index 4531220ac..e0f9db349 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -432,11 +432,12 @@ tasks: vars: image_name: meko-tests - - name: build_test_image_ibm commands: - func: clone - - func: setup_building_host_minikube + - func: python_venv + - func: setup_aws + - func: download_kube_tools - func: build_multi_cluster_binary - func: build_test_image_ibm diff --git a/docker/mongodb-agent/Dockerfile.atomic b/docker/mongodb-agent/Dockerfile.atomic index 3ca58b7ea..5160285ce 100644 --- a/docker/mongodb-agent/Dockerfile.atomic +++ b/docker/mongodb-agent/Dockerfile.atomic @@ -1,5 +1,6 @@ FROM registry.access.redhat.com/ubi8/ubi-minimal AS tools_downloader +ARG TARGETPLATFORM ARG mongodb_tools_url ARG mongodb_tools_version_s390x ARG mongodb_tools_version_ppc64le @@ -24,6 +25,7 @@ RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ FROM registry.access.redhat.com/ubi8/ubi-minimal AS agent_downloader +ARG TARGETPLATFORM ARG mongodb_agent_url ARG mongodb_agent_version_s390x ARG mongodb_agent_version_ppc64le diff --git a/scripts/dev/configure_container_auth.sh b/scripts/dev/configure_container_auth.sh index 9464ba409..6ac1239a3 100755 --- a/scripts/dev/configure_container_auth.sh +++ b/scripts/dev/configure_container_auth.sh @@ -36,7 +36,7 @@ setup_validate_container_runtime() { ;; esac - if [[ "$USE_SUDO" == "true" ]]; then + if [[ "${USE_SUDO}" == "true" ]]; then sudo mkdir -p "$(dirname "${CONFIG_PATH}")" else mkdir -p "$(dirname "${CONFIG_PATH}")" @@ -45,8 +45,8 @@ setup_validate_container_runtime() { # Wrapper function to execute commands with or without sudo exec_cmd() { - if [[ "$USE_SUDO" == "true" ]]; then - sudo env PATH="$PATH" "$@" + if [[ "${USE_SUDO}" == "true" ]]; then + sudo env PATH="${PATH}" "$@" else "$@" fi @@ -55,10 +55,10 @@ exec_cmd() { # Wrapper function to read files with or without sudo read_file() { local file="$1" - if [[ "$USE_SUDO" == "true" ]]; then - sudo cat "$file" + if [[ "${USE_SUDO}" == "true" ]]; then + sudo cat "${file}" else - cat "$file" + cat "${file}" fi } @@ -66,10 +66,10 @@ read_file() { write_file() { local content="$1" local file="$2" - if [[ "$USE_SUDO" == "true" ]]; then - echo "$content" | sudo tee "$file" > /dev/null + if [[ "${USE_SUDO}" == "true" ]]; then + echo "${content}" | sudo tee "${file}" > /dev/null else - echo "$content" > "$file" + echo "${content}" > "${file}" fi } diff --git a/scripts/evergreen/e2e/build_e2e_image_ibm.sh b/scripts/evergreen/e2e/build_e2e_image_ibm.sh index 8d5e8ec3c..637c83357 100755 --- a/scripts/evergreen/e2e/build_e2e_image_ibm.sh +++ b/scripts/evergreen/e2e/build_e2e_image_ibm.sh @@ -1,6 +1,8 @@ #!/usr/bin/env bash source scripts/dev/set_env_context.sh +# we need to use podman here and a special script as ibm machines don't have docker + cp -rf public docker/mongodb-kubernetes-tests/public cp release.json docker/mongodb-kubernetes-tests/release.json cp requirements.txt docker/mongodb-kubernetes-tests/requirements.txt @@ -8,9 +10,7 @@ cp -rf helm_chart docker/mongodb-kubernetes-tests/helm_chart echo "Building mongodb-kubernetes-tests image with tag: ${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}" cd docker/mongodb-kubernetes-tests -#sudo podman buildx build --progress plain . -f Dockerfile -t "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" -#sudo podman push --authfile="/root/.config/containers/auth.json" "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" -docker buildx build --progress plain . -f Dockerfile -t "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" -docker push "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" +sudo podman buildx build --progress plain . -f Dockerfile -t "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" +sudo podman push --authfile="/root/.config/containers/auth.json" "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" # docker buildx imagetools create "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" --append "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}-$(arch)" -t "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" diff --git a/scripts/evergreen/setup_kind.sh b/scripts/evergreen/setup_kind.sh index 3df0aa620..855e50530 100755 --- a/scripts/evergreen/setup_kind.sh +++ b/scripts/evergreen/setup_kind.sh @@ -11,10 +11,15 @@ arch_suffix=$(detect_architecture) # This should be changed when needed latest_version="v0.27.0" -mkdir -p "${PROJECT_DIR}/bin/" -echo "Saving kind to ${PROJECT_DIR}/bin" -curl --retry 3 --silent -L "https://github.com/kubernetes-sigs/kind/releases/download/${latest_version}/kind-${os}-${arch_suffix}" -o kind +# Only proceed with installation if architecture is supported (amd64 or arm64) +if [[ "$arch_suffix" == "amd64" || "$arch_suffix" == "arm64" ]]; then + mkdir -p "${PROJECT_DIR}/bin/" + echo "Saving kind to ${PROJECT_DIR}/bin" + curl --retry 3 --silent -L "https://github.com/kubernetes-sigs/kind/releases/download/${latest_version}/kind-${os}-${arch_suffix}" -o kind -chmod +x kind -sudo mv kind "${PROJECT_DIR}/bin" -echo "Installed kind in ${PROJECT_DIR}/bin" + chmod +x kind + sudo mv kind "${PROJECT_DIR}/bin" + echo "Installed kind in ${PROJECT_DIR}/bin" +else + echo "Architecture ${arch_suffix} not supported for kind installation, skipping" +fi diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index 1c7d6d3a8..db889d412 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -6,7 +6,7 @@ source scripts/dev/set_env_context.sh source scripts/funcs/install -set -Eeoux pipefail +set -Eeou pipefail echo "==========================================" echo "Setting up minikube host with multi-architecture support" @@ -39,7 +39,6 @@ run_setup_step() { } # Setup Python environment (needed for AWS CLI pip installation) -export GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 export SKIP_INSTALL_REQUIREMENTS=true run_setup_step "Python Virtual Environment" "scripts/dev/recreate_python_venv.sh" @@ -54,14 +53,9 @@ run_setup_step "Minikube Host Setup with Container Runtime Detection" "scripts/m export CONTAINER_RUNTIME=podman run_setup_step "Container Registry Authentication" "scripts/dev/configure_container_auth.sh" -# The minikube cluster is already started by the setup_minikube_host.sh script -echo "" -echo ">>> Minikube cluster startup completed by setup_minikube_host.sh" -echo "✅ Minikube cluster is ready for use" - echo "" echo "==========================================" -echo "✅ Minikube host setup completed successfully!" +echo "✅ host setup completed successfully!" echo "==========================================" echo "" echo "Installed tools summary:" @@ -70,7 +64,5 @@ echo "- AWS CLI: $(aws --version 2>/dev/null || echo 'Not found')" echo "- kubectl: $(kubectl version --client 2>/dev/null || echo 'Not found')" echo "- helm: $(helm version --short 2>/dev/null || echo 'Not found')" echo "- jq: $(jq --version 2>/dev/null || echo 'Not found')" -echo "- Container Runtime: $(command -v podman &>/dev/null && echo "Podman $(podman --version 2>/dev/null)" || command -v docker &>/dev/null && echo "Docker $(docker --version 2>/dev/null)" || echo "Not found")" -echo "- Minikube: $(./bin/minikube version --short 2>/dev/null || echo 'Not found')" echo "" -echo "Setup complete! Host is ready for minikube operations." +echo "Setup complete! Host is ready for operations." diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 0ad5b9690..a229938c4 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -64,10 +64,7 @@ def get_build_arg_names(platform: str) -> Dict[str, str]: # Extract architecture from platform (e.g., "amd64" from "linux/amd64") arch = platform.split("/")[1] - return { - "agent_build_arg": f"mongodb_agent_version_{arch}", - "tools_build_arg": f"mongodb_tools_version_{arch}" - } + return {"agent_build_arg": f"mongodb_agent_version_{arch}", "tools_build_arg": f"mongodb_tools_version_{arch}"} def generate_tools_build_args(platforms: List[str], tools_version: str) -> Dict[str, str]: @@ -346,14 +343,13 @@ def build_init_appdb_image(build_configuration: ImageBuildConfiguration): # Extract tools version and generate platform-specific build args tools_version = extract_tools_version_from_release(release) platform_build_args = generate_tools_build_args( - platforms=build_configuration.platforms, - tools_version=tools_version + platforms=build_configuration.platforms, tools_version=tools_version ) args = { "version": build_configuration.version, "mongodb_tools_url": base_url, # Base URL for platform-specific downloads - **platform_build_args # Add the platform-specific build args + **platform_build_args, # Add the platform-specific build args } build_image( @@ -372,14 +368,13 @@ def build_init_database_image(build_configuration: ImageBuildConfiguration): # Extract tools version and generate platform-specific build args tools_version = extract_tools_version_from_release(release) platform_build_args = generate_tools_build_args( - platforms=build_configuration.platforms, - tools_version=tools_version + platforms=build_configuration.platforms, tools_version=tools_version ) args = { "version": build_configuration.version, "mongodb_tools_url": base_url, # Add the base URL for the Dockerfile - **platform_build_args # Add the platform-specific build args + **platform_build_args, # Add the platform-specific build args } build_image( @@ -520,14 +515,7 @@ def _build_agent( agent_version = agent_tools_version[0] tools_version = agent_tools_version[1] - tasks_queue.put( - executor.submit( - build_agent_pipeline, - build_configuration, - agent_version, - tools_version - ) - ) + tasks_queue.put(executor.submit(build_agent_pipeline, build_configuration, agent_version, tools_version)) def build_agent_pipeline( @@ -543,12 +531,12 @@ def build_agent_pipeline( # Generate platform-specific build arguments using the mapping platform_build_args = generate_agent_build_args( - platforms=build_configuration.platforms, - agent_version=agent_version, - tools_version=tools_version + platforms=build_configuration.platforms, agent_version=agent_version, tools_version=tools_version ) - agent_base_url = "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" + agent_base_url = ( + "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" + ) tools_base_url = "https://fastdl.mongodb.org/tools/db" args = { @@ -556,7 +544,7 @@ def build_agent_pipeline( "agent_version": agent_version, "mongodb_agent_url": agent_base_url, "mongodb_tools_url": tools_base_url, - **platform_build_args # Add the platform-specific build args + **platform_build_args, # Add the platform-specific build args } build_image( diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py index 6f19b9acd..10da8a152 100644 --- a/scripts/release/atomic_pipeline_test.py +++ b/scripts/release/atomic_pipeline_test.py @@ -19,10 +19,7 @@ def load_agent_build_info(): def get_build_arg_names(platform): """Generate build argument names for a platform.""" arch = platform.split("/")[1] - return { - "agent_build_arg": f"mongodb_agent_version_{arch}", - "tools_build_arg": f"mongodb_tools_version_{arch}" - } + return {"agent_build_arg": f"mongodb_agent_version_{arch}", "tools_build_arg": f"mongodb_tools_version_{arch}"} def extract_tools_version_from_release(release): @@ -87,21 +84,21 @@ def _parse_dockerfile_build_args(dockerfile_path): """Parse Dockerfile to extract expected build arguments using proper parsing.""" build_args = set() - with open(dockerfile_path, 'r') as f: + with open(dockerfile_path, "r") as f: lines = f.readlines() for line in lines: line = line.strip() # Skip comments and empty lines - if not line or line.startswith('#'): + if not line or line.startswith("#"): continue # Parse ARG instructions - if line.startswith('ARG '): + if line.startswith("ARG "): arg_part = line[4:].strip() # Remove 'ARG ' # Handle ARG with default values (ARG name=default) - arg_name = arg_part.split('=')[0].strip() + arg_name = arg_part.split("=")[0].strip() build_args.add(arg_name) @@ -127,7 +124,7 @@ def test_generate_agent_build_args_single_platform(self): expected = { "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz" + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", } self.assertEqual(result, expected) @@ -148,12 +145,12 @@ def test_generate_agent_build_args_multiple_platforms(self): "mongodb_agent_version_s390x": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel7_s390x.tar.gz", "mongodb_tools_version_s390x": "mongodb-database-tools-rhel9-s390x-100.12.0.tgz", "mongodb_agent_version_ppc64le": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel8_ppc64le.tar.gz", - "mongodb_tools_version_ppc64le": "mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" + "mongodb_tools_version_ppc64le": "mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz", } self.assertEqual(result, expected) - @patch('builtins.print') + @patch("builtins.print") def test_generate_agent_build_args_unknown_platform(self, mock_print): """Test handling of unknown platforms.""" platforms = ["linux/amd64", "linux/unknown"] @@ -165,7 +162,7 @@ def test_generate_agent_build_args_unknown_platform(self, mock_print): # Should only include known platform expected = { "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz" + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", } self.assertEqual(result, expected) @@ -186,10 +183,14 @@ def test_build_args_match_dockerfile_requirements(self): # Define the expected build args based on the platforms we support # This is cleaner than parsing the Dockerfile and more explicit about our expectations expected_dockerfile_args = { - "mongodb_agent_version_amd64", "mongodb_agent_version_arm64", - "mongodb_agent_version_s390x", "mongodb_agent_version_ppc64le", - "mongodb_tools_version_amd64", "mongodb_tools_version_arm64", - "mongodb_tools_version_s390x", "mongodb_tools_version_ppc64le" + "mongodb_agent_version_amd64", + "mongodb_agent_version_arm64", + "mongodb_agent_version_s390x", + "mongodb_agent_version_ppc64le", + "mongodb_tools_version_amd64", + "mongodb_tools_version_arm64", + "mongodb_tools_version_s390x", + "mongodb_tools_version_ppc64le", } # Generate build args for all platforms @@ -201,8 +202,11 @@ def test_build_args_match_dockerfile_requirements(self): generated_build_args = set(result.keys()) # Verify that we generate exactly the build args the Dockerfile expects - self.assertEqual(generated_build_args, expected_dockerfile_args, - f"Generated build args {generated_build_args} don't match expected {expected_dockerfile_args}") + self.assertEqual( + generated_build_args, + expected_dockerfile_args, + f"Generated build args {generated_build_args} don't match expected {expected_dockerfile_args}", + ) # Verify the format of generated filenames matches what Dockerfile expects for arg_name, filename in result.items(): @@ -218,21 +222,26 @@ def test_dockerfile_contains_expected_args(self): dockerfile_path = "docker/mongodb-agent/Dockerfile.atomic" # Read the Dockerfile content - with open(dockerfile_path, 'r') as f: + with open(dockerfile_path, "r") as f: dockerfile_content = f.read() # Define the expected build args expected_args = [ - "mongodb_agent_version_amd64", "mongodb_agent_version_arm64", - "mongodb_agent_version_s390x", "mongodb_agent_version_ppc64le", - "mongodb_tools_version_amd64", "mongodb_tools_version_arm64", - "mongodb_tools_version_s390x", "mongodb_tools_version_ppc64le" + "mongodb_agent_version_amd64", + "mongodb_agent_version_arm64", + "mongodb_agent_version_s390x", + "mongodb_agent_version_ppc64le", + "mongodb_tools_version_amd64", + "mongodb_tools_version_arm64", + "mongodb_tools_version_s390x", + "mongodb_tools_version_ppc64le", ] # Verify each expected arg is declared in the Dockerfile for arg_name in expected_args: - self.assertIn(f"ARG {arg_name}", dockerfile_content, - f"Dockerfile should contain 'ARG {arg_name}' declaration") + self.assertIn( + f"ARG {arg_name}", dockerfile_content, f"Dockerfile should contain 'ARG {arg_name}' declaration" + ) def test_generate_tools_build_args(self): """Test generating tools-only build args.""" @@ -243,18 +252,14 @@ def test_generate_tools_build_args(self): expected = { "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", - "mongodb_tools_version_arm64": "mongodb-database-tools-rhel88-aarch64-100.12.0.tgz" + "mongodb_tools_version_arm64": "mongodb-database-tools-rhel88-aarch64-100.12.0.tgz", } self.assertEqual(result, expected) def test_extract_tools_version_from_release(self): """Test extracting tools version from release.json structure.""" - release = { - "mongodbToolsBundle": { - "ubi": "mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" - } - } + release = {"mongodbToolsBundle": {"ubi": "mongodb-database-tools-rhel88-x86_64-100.12.2.tgz"}} result = extract_tools_version_from_release(release) self.assertEqual(result, "100.12.2") @@ -268,8 +273,10 @@ def test_tools_build_args_match_init_dockerfiles(self): # Verify all expected tools build args are present (no agent args) expected_tools_args = { - "mongodb_tools_version_amd64", "mongodb_tools_version_arm64", - "mongodb_tools_version_s390x", "mongodb_tools_version_ppc64le" + "mongodb_tools_version_amd64", + "mongodb_tools_version_arm64", + "mongodb_tools_version_s390x", + "mongodb_tools_version_ppc64le", } generated_args = set(result.keys()) @@ -289,7 +296,9 @@ def test_url_construction_correctness(self): result = generate_agent_build_args(platforms, agent_version, tools_version) - agent_base_url = "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" + agent_base_url = ( + "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" + ) tools_base_url = "https://fastdl.mongodb.org/tools/db" agent_filename = result["mongodb_agent_version_amd64"] From d36f392e459fdb446dd6b11f9036ad35621b9d89 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 13 Aug 2025 12:13:11 +0200 Subject: [PATCH 139/164] linter, reordering minkube, fix agents, fix venv, fix python run path --- .evergreen-functions.yml | 4 ++-- .evergreen.yml | 12 +++++++++--- build_info.json | 8 ++------ scripts/dev/recreate_python_venv.sh | 4 ++-- scripts/evergreen/setup_minikube_host.sh | 8 ++++++-- 5 files changed, 21 insertions(+), 15 deletions(-) diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index df7a1cfe5..57b705ad5 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -352,7 +352,7 @@ functions: shell: bash working_dir: src/github.com/mongodb/mongodb-kubernetes script: | - scripts/evergreen/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py create + scripts/dev/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py create # The additional switch is needed, since we now have created the needed OM exports. - *switch_context @@ -363,7 +363,7 @@ functions: shell: bash working_dir: src/github.com/mongodb/mongodb-kubernetes script: | - scripts/evergreen/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py delete + scripts/dev/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py delete dump_diagnostic_information_from_all_namespaces: - command: subprocess.exec diff --git a/.evergreen.yml b/.evergreen.yml index e0f9db349..ed51a28e8 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -435,9 +435,15 @@ tasks: - name: build_test_image_ibm commands: - func: clone - - func: python_venv - - func: setup_aws - - func: download_kube_tools + - command: subprocess.exec + type: setup + params: + env: + SKIP_MINIKUBE_SETUP: "true" + working_dir: src/github.com/mongodb/mongodb-kubernetes + add_to_path: + - ${workdir}/bin + command: scripts/evergreen/setup_minikube_host.sh - func: build_multi_cluster_binary - func: build_test_image_ibm diff --git a/build_info.json b/build_info.json index 1db22b5fc..6c68d7421 100644 --- a/build_info.json +++ b/build_info.json @@ -160,18 +160,14 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-tests", "platforms": [ "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "staging": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-tests", "platforms": [ "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] } }, diff --git a/scripts/dev/recreate_python_venv.sh b/scripts/dev/recreate_python_venv.sh index 6594b3fb2..5ccff918e 100755 --- a/scripts/dev/recreate_python_venv.sh +++ b/scripts/dev/recreate_python_venv.sh @@ -12,7 +12,7 @@ install_pyenv() { echo "pyenv directory already exists, setting up environment..." >&2 export PYENV_ROOT="${HOME}/.pyenv" export PATH="${PYENV_ROOT}/bin:${PATH}" - + # Initialize pyenv in current shell if command -v pyenv &> /dev/null; then eval "$(pyenv init --path)" @@ -24,7 +24,7 @@ install_pyenv() { rm -rf "${HOME}/.pyenv" fi fi - + # Check if pyenv command is available in PATH if command -v pyenv &> /dev/null; then echo "pyenv already available in PATH" >&2 diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh index db889d412..327d9e17a 100755 --- a/scripts/evergreen/setup_minikube_host.sh +++ b/scripts/evergreen/setup_minikube_host.sh @@ -39,6 +39,7 @@ run_setup_step() { } # Setup Python environment (needed for AWS CLI pip installation) +export GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 export SKIP_INSTALL_REQUIREMENTS=true run_setup_step "Python Virtual Environment" "scripts/dev/recreate_python_venv.sh" @@ -48,8 +49,11 @@ run_setup_step "kubectl and helm Setup" "scripts/evergreen/setup_kubectl.sh" run_setup_step "jq Setup" "scripts/evergreen/setup_jq.sh" -run_setup_step "Minikube Host Setup with Container Runtime Detection" "scripts/minikube/setup_minikube_host.sh" - +if [[ "${SKIP_MINIKUBE_SETUP:-}" != "true" ]]; then + run_setup_step "Minikube Host Setup with Container Runtime Detection" "scripts/minikube/setup_minikube_host.sh" +else + echo "⏭️ Skipping Minikube setup as SKIP_MINIKUBE_SETUP=true" +fi export CONTAINER_RUNTIME=podman run_setup_step "Container Registry Authentication" "scripts/dev/configure_container_auth.sh" From 1c3a7adfc8ee38c4c1316024f44bb56ae122bc39 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 13 Aug 2025 12:37:40 +0200 Subject: [PATCH 140/164] remove lucians images --- scripts/dev/contexts/e2e_smoke_arm | 10 ---------- scripts/dev/contexts/e2e_smoke_ibm_power | 9 --------- scripts/dev/contexts/e2e_smoke_ibm_z | 9 --------- 3 files changed, 28 deletions(-) diff --git a/scripts/dev/contexts/e2e_smoke_arm b/scripts/dev/contexts/e2e_smoke_arm index 64568ed5b..6ca95ea73 100644 --- a/scripts/dev/contexts/e2e_smoke_arm +++ b/scripts/dev/contexts/e2e_smoke_arm @@ -18,13 +18,3 @@ export CUSTOM_MDB_VERSION=6.0.5-ent export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=kind export CLUSTER_TYPE=kind - - -# TODO: change once we have image building -export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" -export REGISTRY="${BASE_REPO_URL}" -export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" -export OPERATOR_REGISTRY=${BASE_REPO_URL} -export DATABASE_REGISTRY=${BASE_REPO_URL} -export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} - diff --git a/scripts/dev/contexts/e2e_smoke_ibm_power b/scripts/dev/contexts/e2e_smoke_ibm_power index 2b1000cb6..e811437d9 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_power +++ b/scripts/dev/contexts/e2e_smoke_ibm_power @@ -19,12 +19,3 @@ export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=minikube export CLUSTER_TYPE=minikube export CONTAINER_RUNTIME=podman - -# TODO: change once we have image building -export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" -export REGISTRY="${BASE_REPO_URL}" -export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" -export OPERATOR_REGISTRY=${BASE_REPO_URL} -export DATABASE_REGISTRY=${BASE_REPO_URL} -export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} - diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z index 2b1000cb6..e811437d9 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_z +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -19,12 +19,3 @@ export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=minikube export CLUSTER_TYPE=minikube export CONTAINER_RUNTIME=podman - -# TODO: change once we have image building -export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" -export REGISTRY="${BASE_REPO_URL}" -export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" -export OPERATOR_REGISTRY=${BASE_REPO_URL} -export DATABASE_REGISTRY=${BASE_REPO_URL} -export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} - From d707450cae1a7cbdfc269c7707e52c9bd84d4814 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 13 Aug 2025 13:56:38 +0200 Subject: [PATCH 141/164] ibm smoke tests only on master merges --- .evergreen.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.evergreen.yml b/.evergreen.yml index 08cf19d77..eac7b16c1 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -1494,7 +1494,7 @@ buildvariants: run_on: - rhel9-power-small - rhel9-power-large - allowed_requesters: [ "patch", "github_tag" ] + allowed_requesters: [ "github_tag" ] depends_on: - name: build_operator_ubi variant: init_test_run @@ -1519,7 +1519,7 @@ buildvariants: run_on: - rhel9-zseries-small - rhel9-zseries-large - allowed_requesters: [ "patch", "github_tag" ] + allowed_requesters: [ "github_tag" ] depends_on: - name: build_operator_ubi variant: init_test_run From d7f05388c79bbda7b5330ae2cc87cafd90b91b38 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 13 Aug 2025 15:00:25 +0200 Subject: [PATCH 142/164] fix master merge --- scripts/release/atomic_pipeline.py | 24 ------------------------ 1 file changed, 24 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 564c4e709..c86f69fda 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -27,29 +27,6 @@ TRACER = trace.get_tracer("evergreen-agent") -@TRACER.start_as_current_span("build_image") -def build_image( - build_configuration: ImageBuildConfiguration, - build_args: Dict[str, str] = None, - build_path: str = ".", -): - """ - Build an image then (optionally) sign the result. - """ - image_name = build_configuration.image_name() - span = trace.get_current_span() - span.set_attribute("mck.image_name", image_name) - - base_registry = build_configuration.base_registry() - build_args = build_args or {} - - if build_args: - span.set_attribute("mck.build_args", str(build_args)) - span.set_attribute("mck.registry", base_registry) - span.set_attribute("mck.platforms", build_configuration.platforms) - - # Build docker registry URI and call build_image - image_full_uri = f"{build_configuration.registry}:{build_configuration.version}" def load_agent_build_info(): """Load agent platform mappings from build_info_agent.json""" with open("build_info_agent.json", "r") as f: @@ -556,7 +533,6 @@ def build_agent_pipeline( } build_image( - dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", build_configuration=build_configuration_copy, build_args=args, ) From 65d62962483777babef1a7e82536fe402d641edc Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 18 Aug 2025 13:01:37 +0200 Subject: [PATCH 143/164] pipeline --- scripts/release/atomic_pipeline.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 9d1a76200..e0dd5def6 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -8,7 +8,7 @@ from concurrent.futures import ProcessPoolExecutor from copy import copy from queue import Queue -from typing import Dict, Optional, Tuple +from typing import Dict, List, Optional, Tuple import requests from opentelemetry import trace @@ -123,11 +123,9 @@ def generate_agent_build_args(platforms: List[str], agent_version: str, tools_ve mapping = agent_info["platform_mappings"][platform] build_arg_names = get_build_arg_names(platform) - # Generate agent build arg agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" build_args[build_arg_names["agent_build_arg"]] = agent_filename - # Generate tools build arg tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" build_args[build_arg_names["tools_build_arg"]] = tools_filename @@ -499,14 +497,6 @@ def queue_exception_handling(tasks_queue): ) -def get_tools_distro(tools_version: str) -> Dict[str, str]: - new_rhel_tool_version = "100.10.0" - default_distro = {"arm": "rhel90-aarch64", "amd": "rhel90-x86_64"} - if Version(tools_version) >= Version(new_rhel_tool_version): - return {"arm": "rhel93-aarch64", "amd": "rhel93-x86_64"} - return default_distro - - def load_release_file() -> Dict: with open("release.json") as release: return json.load(release) From 5eb0310f778c18f8e767cb494857b00cefd0af58 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 18 Aug 2025 13:29:28 +0200 Subject: [PATCH 144/164] support releasing latest agents --- scripts/release/atomic_pipeline.py | 5 +- .../build/image_build_configuration.py | 1 + scripts/release/detect_ops_manager_changes.py | 67 +++++++++++++++++++ scripts/release/pipeline_main.py | 6 ++ 4 files changed, 78 insertions(+), 1 deletion(-) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index e0dd5def6..c7a774035 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -12,7 +12,6 @@ import requests from opentelemetry import trace -from packaging.version import Version from lib.base_logger import logger from scripts.release.build.image_build_configuration import ImageBuildConfiguration @@ -24,6 +23,7 @@ ) from scripts.release.detect_ops_manager_changes import ( detect_ops_manager_changes, + get_currently_used_agents, get_all_agents_for_rebuild, ) @@ -407,6 +407,9 @@ def build_agent(build_configuration: ImageBuildConfiguration): if build_configuration.all_agents: agent_versions_to_build = get_all_agents_for_rebuild() logger.info("building all agents") + elif build_configuration.currently_used_agents: + agent_versions_to_build = get_currently_used_agents() + logger.info("building current used agents") else: agent_versions_to_build = detect_ops_manager_changes() logger.info("building agents for changed OM versions") diff --git a/scripts/release/build/image_build_configuration.py b/scripts/release/build/image_build_configuration.py index 72fe8db8d..6a8c45253 100644 --- a/scripts/release/build/image_build_configuration.py +++ b/scripts/release/build/image_build_configuration.py @@ -18,6 +18,7 @@ class ImageBuildConfiguration: platforms: Optional[List[str]] = None sign: bool = False all_agents: bool = False + currently_used_agents: bool = False def is_release_scenario(self) -> bool: return self.scenario == BuildScenario.RELEASE diff --git a/scripts/release/detect_ops_manager_changes.py b/scripts/release/detect_ops_manager_changes.py index 5276497d8..65fe8fee9 100644 --- a/scripts/release/detect_ops_manager_changes.py +++ b/scripts/release/detect_ops_manager_changes.py @@ -3,8 +3,10 @@ Detects changes to opsManagerMapping in release.json for triggering agent releases. Relies on git origin/master vs local release.json """ +import glob import json import logging +import os import subprocess import sys from typing import Dict, List, Optional, Tuple @@ -132,6 +134,71 @@ def get_all_agents_for_rebuild() -> List[Tuple[str, str]]: return list(set(agents)) +def get_currently_used_agents() -> List[Tuple[str, str]]: + """Returns list of (agent_version, tools_version) tuples for agents currently used in contexts""" + logger.info("Getting currently used agents from contexts") + agents = [] + + try: + release_data = load_current_release_json() + if not release_data: + logger.error("Could not load release.json") + return [] + + ops_manager_mapping = extract_ops_manager_mapping(release_data) + ops_manager_versions = ops_manager_mapping.get("ops_manager", {}) + + # Search all context files + context_pattern = "scripts/dev/contexts/**/*" + context_files = glob.glob(context_pattern, recursive=True) + + for context_file in context_files: + if os.path.isfile(context_file): + try: + with open(context_file, "r") as f: + content = f.read() + + # Extract AGENT_VERSION from the context file + for line in content.split('\n'): + if line.startswith('export AGENT_VERSION='): + agent_version = line.split('=')[1].strip() + tools_version = get_tools_version_for_agent(agent_version) + agents.append((agent_version, tools_version)) + logger.info(f"Found agent {agent_version} in {context_file}") + break + + # Extract CUSTOM_OM_VERSION and map to agent version + for line in content.split('\n'): + if line.startswith('export CUSTOM_OM_VERSION='): + om_version = line.split('=')[1].strip() + if om_version in ops_manager_versions: + agent_tools = ops_manager_versions[om_version] + agent_version = agent_tools.get("agent_version") + tools_version = agent_tools.get("tools_version") + if agent_version and tools_version: + agents.append((agent_version, tools_version)) + logger.info(f"Found OM version {om_version} -> agent {agent_version} in {context_file}") + break + + except Exception as e: + logger.debug(f"Error reading context file {context_file}: {e}") + + # Also add the main agentVersion from release.json + main_agent_version = release_data.get("agentVersion") + if main_agent_version: + tools_version = get_tools_version_for_agent(main_agent_version) + agents.append((main_agent_version, tools_version)) + logger.info(f"Found main agent version from release.json: {main_agent_version}") + + unique_agents = list(set(agents)) + logger.info(f"Found {len(unique_agents)} currently used agents") + return unique_agents + + except Exception as e: + logger.error(f"Error getting currently used agents: {e}") + return [] + + def detect_ops_manager_changes() -> List[Tuple[str, str]]: """Returns (has_changes, changed_agents_list)""" logger.info("=== Detecting OM Mapping Changes (Local vs Base) ===") diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index b5fe22d9d..6edc4fac8 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -124,6 +124,7 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: sign=sign, parallel_factor=args.parallel_factor, all_agents=args.all_agents, + currently_used_agents=args.current_agents, ) @@ -257,6 +258,11 @@ def main(): action="store_true", help="Build all agent images.", ) + parser.add_argument( + "--current-agents", + action="store_true", + help="Build all currently used agent images.", + ) args = parser.parse_args() From d1fd1e19e1e988a3aa487c98028668a0ba76795a Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 18 Aug 2025 14:30:05 +0200 Subject: [PATCH 145/164] refactor agent support and support releasing latest used agents --- .evergreen.yml | 22 +++ scripts/release/agent/__init__.py | 0 .../{ => agent}/detect_ops_manager_changes.py | 0 scripts/release/agent/validation.py | 91 +++++++++ scripts/release/atomic_pipeline.py | 77 ++++---- scripts/release/atomic_pipeline_test.py | 175 +++--------------- 6 files changed, 179 insertions(+), 186 deletions(-) create mode 100644 scripts/release/agent/__init__.py rename scripts/release/{ => agent}/detect_ops_manager_changes.py (100%) create mode 100644 scripts/release/agent/validation.py diff --git a/.evergreen.yml b/.evergreen.yml index 9b3601990..f77f75605 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -397,6 +397,18 @@ tasks: image_name: agent all_agents: "--all-agents" + - name: release_all_currently_used_agents_on_ecr + # this enables us to run this manually (patch) and release all agent versions to ECR to verify + # Dockerfile, script changes etc. + allowed_requesters: [ "patch" ] + commands: + - func: clone + - func: setup_building_host + - func: pipeline + vars: + image_name: agent + all_agents: "--current-agents" + - name: build_test_image commands: - func: clone @@ -1927,6 +1939,16 @@ buildvariants: tasks: - name: release_all_agents_on_ecr + # Only called manually, It's used for testing the task release_agents_on_ecr in case the release.json + # has not changed, and you still want to push the images to ecr. + - name: manual_ecr_release_agent_currently_used + display_name: manual_ecr_release_agent_currently_used + tags: [ "release_all_agents_manually" ] + run_on: + - ubuntu2204-large + tasks: + - name: release_all_currently_used_agents_on_ecr + # These variants are used to test the code snippets and each one can be used in patches # Prerelease is especially used when the repo is tagged # More details in the TD: https://docs.google.com/document/d/1fuTxfRtP8QPtn7sKYxQM_AGcD6xycTZH8svngGxyKhc/edit?tab=t.0#bookmark=id.e8uva0393mbe diff --git a/scripts/release/agent/__init__.py b/scripts/release/agent/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/scripts/release/detect_ops_manager_changes.py b/scripts/release/agent/detect_ops_manager_changes.py similarity index 100% rename from scripts/release/detect_ops_manager_changes.py rename to scripts/release/agent/detect_ops_manager_changes.py diff --git a/scripts/release/agent/validation.py b/scripts/release/agent/validation.py new file mode 100644 index 000000000..b7e83ca8c --- /dev/null +++ b/scripts/release/agent/validation.py @@ -0,0 +1,91 @@ +import json +from typing import List + +import requests + +from lib.base_logger import logger + + +def load_agent_build_info(): + """Load agent platform mappings from build_info_agent.json""" + with open("build_info_agent.json", "r") as f: + return json.load(f) + + +def validate_agent_version_exists(agent_version: str, platforms: List[str]) -> bool: + """ + Validate that the agent version exists for all specified platforms by checking URLs. + + Args: + agent_version: MongoDB agent version to validate + platforms: List of platforms to check + + Returns: + True if agent version exists for all platforms, False otherwise + """ + agent_info = load_agent_build_info() + agent_base_url = ( + "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" + ) + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + logger.warning(f"Platform {platform} not found in agent mappings, skipping validation") + continue + + mapping = agent_info["platform_mappings"][platform] + agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" + agent_url = f"{agent_base_url}/{agent_filename}" + + try: + # Use HEAD request to check if URL exists without downloading the file + response = requests.head(agent_url, timeout=30) + if response.status_code != 200: + logger.warning(f"Agent version {agent_version} not found for platform {platform} at {agent_url} (HTTP {response.status_code})") + return False + logger.debug(f"Agent version {agent_version} validated for platform {platform}") + except requests.RequestException as e: + logger.warning(f"Failed to validate agent version {agent_version} for platform {platform}: {e}") + return False + + logger.info(f"Agent version {agent_version} validated for all platforms: {platforms}") + return True + + +def validate_tools_version_exists(tools_version: str, platforms: List[str]) -> bool: + """ + Validate that the tools version exists for all specified platforms by checking URLs. + + Args: + tools_version: MongoDB tools version to validate + platforms: List of platforms to check + + Returns: + True if tools version exists for all platforms, False otherwise + """ + agent_info = load_agent_build_info() + tools_base_url = "https://fastdl.mongodb.org/tools/db" + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + logger.warning(f"Platform {platform} not found in agent mappings, skipping tools validation") + continue + + mapping = agent_info["platform_mappings"][platform] + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" + tools_url = f"{tools_base_url}/{tools_filename}" + + try: + # Use HEAD request to check if URL exists without downloading the file + response = requests.head(tools_url, timeout=30) + if response.status_code != 200: + logger.warning(f"Tools version {tools_version} not found for platform {platform} at {tools_url} (HTTP {response.status_code})") + return False + logger.debug(f"Tools version {tools_version} validated for platform {platform}") + except requests.RequestException as e: + logger.warning(f"Failed to validate tools version {tools_version} for platform {platform}: {e}") + return False + + logger.info(f"Tools version {tools_version} validated for all platforms: {platforms}") + return True diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index c7a774035..28c9d218f 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -14,6 +14,7 @@ from opentelemetry import trace from lib.base_logger import logger +from scripts.release.agent.validation import validate_agent_version_exists, validate_tools_version_exists,load_agent_build_info from scripts.release.build.image_build_configuration import ImageBuildConfiguration from scripts.release.build.image_build_process import execute_docker_build from scripts.release.build.image_signing import ( @@ -21,7 +22,7 @@ sign_image, verify_signature, ) -from scripts.release.detect_ops_manager_changes import ( +from scripts.release.agent.detect_ops_manager_changes import ( detect_ops_manager_changes, get_currently_used_agents, get_all_agents_for_rebuild, @@ -30,19 +31,10 @@ TRACER = trace.get_tracer("evergreen-agent") -def load_agent_build_info(): - """Load agent platform mappings from build_info_agent.json""" - with open("build_info_agent.json", "r") as f: - return json.load(f) - - def extract_tools_version_from_release(release: Dict) -> str: """ Extract tools version from release.json mongodbToolsBundle.ubi field. - Args: - release: Release dictionary from release.json - Returns: Tools version string (e.g., "100.12.2") """ @@ -54,22 +46,6 @@ def extract_tools_version_from_release(release: Dict) -> str: return tools_version -def get_build_arg_names(platform: str) -> Dict[str, str]: - """ - Generate build argument names for a platform. - - Args: - platform: Platform string (e.g., "linux/amd64") - - Returns: - Dictionary with agent_build_arg and tools_build_arg keys - """ - # Extract architecture from platform (e.g., "amd64" from "linux/amd64") - arch = platform.split("/")[1] - - return {"agent_build_arg": f"mongodb_agent_version_{arch}", "tools_build_arg": f"mongodb_tools_version_{arch}"} - - def generate_tools_build_args(platforms: List[str], tools_version: str) -> Dict[str, str]: """ Generate build arguments for MongoDB tools based on platform mappings. @@ -90,12 +66,11 @@ def generate_tools_build_args(platforms: List[str], tools_version: str) -> Dict[ continue mapping = agent_info["platform_mappings"][platform] - build_arg_names = get_build_arg_names(platform) + arch = platform.split("/")[-1] - # Generate tools build arg only tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - build_args[build_arg_names["tools_build_arg"]] = tools_filename + build_args[f"mongodb_tool_version_{arch}"] = tools_filename return build_args @@ -121,14 +96,14 @@ def generate_agent_build_args(platforms: List[str], agent_version: str, tools_ve continue mapping = agent_info["platform_mappings"][platform] - build_arg_names = get_build_arg_names(platform) + arch = platform.split("/")[-1] agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" - build_args[build_arg_names["agent_build_arg"]] = agent_filename + build_args[f"mongodb_agent_version_{arch}"] = agent_filename tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - build_args[build_arg_names["tools_build_arg"]] = tools_filename + build_args[f"mongodb_tool_version_{arch}"] = tools_filename return build_args @@ -339,6 +314,12 @@ def build_init_appdb_image(build_configuration: ImageBuildConfiguration): # Extract tools version and generate platform-specific build args tools_version = extract_tools_version_from_release(release) + + # Validate that the tools version exists before attempting to build + if not validate_tools_version_exists(tools_version, build_configuration.platforms): + logger.warning(f"Skipping build for init-appdb - tools version {tools_version} not found in repository") + return + platform_build_args = generate_tools_build_args( platforms=build_configuration.platforms, tools_version=tools_version ) @@ -359,10 +340,15 @@ def build_init_appdb_image(build_configuration: ImageBuildConfiguration): def build_init_database_image(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db" - mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) # Extract tools version and generate platform-specific build args tools_version = extract_tools_version_from_release(release) + + # Validate that the tools version exists before attempting to build + if not validate_tools_version_exists(tools_version, build_configuration.platforms): + logger.warning(f"Skipping build for init-database - tools version {tools_version} not found in repository") + return + platform_build_args = generate_tools_build_args( platforms=build_configuration.platforms, tools_version=tools_version ) @@ -429,8 +415,26 @@ def build_agent(build_configuration: ImageBuildConfiguration): with ProcessPoolExecutor(max_workers=max_workers) as executor: logger.info(f"Running with factor of {max_workers}") logger.info(f"======= Agent versions to build {agent_versions_to_build} =======") + + successful_builds = [] + skipped_builds = [] + for idx, agent_tools_version in enumerate(agent_versions_to_build): - logger.info(f"======= Building Agent {agent_tools_version} ({idx}/{len(agent_versions_to_build)})") + agent_version = agent_tools_version[0] + tools_version = agent_tools_version[1] + logger.info(f"======= Building Agent {agent_tools_version} ({idx + 1}/{len(agent_versions_to_build)})") + + if not validate_agent_version_exists(agent_version, build_configuration.platforms): + logger.warning(f"Skipping agent version {agent_version} - not found in repository") + skipped_builds.append(agent_tools_version) + continue + + if not validate_tools_version_exists(tools_version, build_configuration.platforms): + logger.warning(f"Skipping agent version {agent_version} - tools version {tools_version} not found in repository") + skipped_builds.append(agent_tools_version) + continue + + successful_builds.append(agent_tools_version) _build_agent( agent_tools_version, build_configuration, @@ -438,6 +442,10 @@ def build_agent(build_configuration: ImageBuildConfiguration): tasks_queue, ) + logger.info(f"Build summary: {len(successful_builds)} successful, {len(skipped_builds)} skipped") + if skipped_builds: + logger.info(f"Skipped versions: {skipped_builds}") + queue_exception_handling(tasks_queue) @@ -464,6 +472,7 @@ def build_agent_pipeline( f"======== Building agent pipeline for version {agent_version}, build configuration version: {build_configuration.version}" ) + # Note: Validation is now done earlier in the build_agent function # Generate platform-specific build arguments using the mapping platform_build_args = generate_agent_build_args( platforms=build_configuration.platforms, agent_version=agent_version, tools_version=tools_version diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py index 10da8a152..4e81abac6 100644 --- a/scripts/release/atomic_pipeline_test.py +++ b/scripts/release/atomic_pipeline_test.py @@ -7,104 +7,6 @@ import unittest from unittest.mock import patch -# Local implementations to avoid import issues - - -def load_agent_build_info(): - """Load agent platform mappings from build_info_agent.json""" - with open("build_info_agent.json", "r") as f: - return json.load(f) - - -def get_build_arg_names(platform): - """Generate build argument names for a platform.""" - arch = platform.split("/")[1] - return {"agent_build_arg": f"mongodb_agent_version_{arch}", "tools_build_arg": f"mongodb_tools_version_{arch}"} - - -def extract_tools_version_from_release(release): - """Extract tools version from release.json mongodbToolsBundle.ubi field.""" - tools_bundle = release["mongodbToolsBundle"]["ubi"] - version_part = tools_bundle.split("-")[-1] # Gets "100.12.2.tgz" - tools_version = version_part.replace(".tgz", "") # Gets "100.12.2" - return tools_version - - -def generate_tools_build_args(platforms, tools_version): - """Generate build arguments for MongoDB tools based on platform mappings.""" - agent_info = load_agent_build_info() - build_args = {} - - for platform in platforms: - if platform not in agent_info["platform_mappings"]: - print(f"Platform {platform} not found in agent mappings, skipping") - continue - - mapping = agent_info["platform_mappings"][platform] - build_arg_names = get_build_arg_names(platform) - - # Generate tools build arg only - tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) - tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - build_args[build_arg_names["tools_build_arg"]] = tools_filename - - return build_args - - -def generate_agent_build_args(platforms, agent_version, tools_version): - """ - Generate build arguments for agent image based on platform mappings. - This is the actual implementation from atomic_pipeline.py - """ - agent_info = load_agent_build_info() - build_args = {} - - for platform in platforms: - if platform not in agent_info["platform_mappings"]: - # Mock the logger warning for testing - print(f"Platform {platform} not found in agent mappings, skipping") - continue - - mapping = agent_info["platform_mappings"][platform] - build_arg_names = get_build_arg_names(platform) - - # Generate agent build arg - agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" - build_args[build_arg_names["agent_build_arg"]] = agent_filename - - # Generate tools build arg - tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) - tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - build_args[build_arg_names["tools_build_arg"]] = tools_filename - - return build_args - - -def _parse_dockerfile_build_args(dockerfile_path): - """Parse Dockerfile to extract expected build arguments using proper parsing.""" - build_args = set() - - with open(dockerfile_path, "r") as f: - lines = f.readlines() - - for line in lines: - line = line.strip() - # Skip comments and empty lines - if not line or line.startswith("#"): - continue - - # Parse ARG instructions - if line.startswith("ARG "): - arg_part = line[4:].strip() # Remove 'ARG ' - - # Handle ARG with default values (ARG name=default) - arg_name = arg_part.split("=")[0].strip() - - build_args.add(arg_name) - - return build_args - - class TestAgentBuildMapping(unittest.TestCase): """Test cases for agent build mapping functionality.""" @@ -114,60 +16,6 @@ def setUp(self): with open("build_info_agent.json", "r") as f: self.agent_build_info = json.load(f) - def test_generate_agent_build_args_single_platform(self): - """Test generating build args for a single platform.""" - platforms = ["linux/amd64"] - agent_version = "108.0.7.8810-1" - tools_version = "100.12.0" - - result = generate_agent_build_args(platforms, agent_version, tools_version) - - expected = { - "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", - } - - self.assertEqual(result, expected) - - def test_generate_agent_build_args_multiple_platforms(self): - """Test generating build args for multiple platforms.""" - platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] - agent_version = "108.0.7.8810-1" - tools_version = "100.12.0" - - result = generate_agent_build_args(platforms, agent_version, tools_version) - - expected = { - "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", - "mongodb_agent_version_arm64": "mongodb-mms-automation-agent-108.0.7.8810-1.amzn2_aarch64.tar.gz", - "mongodb_tools_version_arm64": "mongodb-database-tools-rhel88-aarch64-100.12.0.tgz", - "mongodb_agent_version_s390x": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel7_s390x.tar.gz", - "mongodb_tools_version_s390x": "mongodb-database-tools-rhel9-s390x-100.12.0.tgz", - "mongodb_agent_version_ppc64le": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel8_ppc64le.tar.gz", - "mongodb_tools_version_ppc64le": "mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz", - } - - self.assertEqual(result, expected) - - @patch("builtins.print") - def test_generate_agent_build_args_unknown_platform(self, mock_print): - """Test handling of unknown platforms.""" - platforms = ["linux/amd64", "linux/unknown"] - agent_version = "108.0.7.8810-1" - tools_version = "100.12.0" - - result = generate_agent_build_args(platforms, agent_version, tools_version) - - # Should only include known platform - expected = { - "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", - } - - self.assertEqual(result, expected) - mock_print.assert_called_once_with("Platform linux/unknown not found in agent mappings, skipping") - def test_generate_agent_build_args_empty_platforms(self): """Test generating build args with empty platforms list.""" platforms = [] @@ -322,6 +170,29 @@ def test_url_construction_correctness(self): self.assertFalse(agent_base_url.endswith("/")) self.assertFalse(tools_base_url.endswith("/")) + def test_agent_version_validation(self): + """Test that agent version validation works correctly.""" + from scripts.release.agent.validation import validate_tools_version_exists + from scripts.release.agent.validation import validate_agent_version_exists + + platforms = ["linux/amd64"] + + # Test with a known good agent version (this should exist) + good_agent_version = "108.0.12.8846-1" + self.assertTrue(validate_agent_version_exists(good_agent_version, platforms)) + + # Test with a known bad agent version (this should not exist) + bad_agent_version = "12.0.33.7866-1" + self.assertFalse(validate_agent_version_exists(bad_agent_version, platforms)) + + # Test with a known good tools version (this should exist) + good_tools_version = "100.12.2" + self.assertTrue(validate_tools_version_exists(good_tools_version, platforms)) + + # Test with a known bad tools version (this should not exist) + bad_tools_version = "999.99.99" + self.assertFalse(validate_tools_version_exists(bad_tools_version, platforms)) + if __name__ == "__main__": unittest.main() From 6091c73fc32eb9812520f14158fd6fc929de7ca6 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 19 Aug 2025 15:52:32 +0200 Subject: [PATCH 146/164] CLOUDP-337356 - static support (#333) # Summary - i've manually released the agents (only cm worked) to [ecr](https://spruce.mongodb.com/task/mongodb_kubernetes_manual_ecr_release_agent_currently_used_release_all_currently_used_agents_on_ecr_patch_da6c026f84e33f76e0cd10b4317dc4798572cb99_68a3643d660ad1000740fc4e_25_08_18_17_34_54/logs?execution=0) - static e2e tests: [link](https://spruce.mongodb.com/version/68a32ff8c96f530007b70dc4/tasks?sorts=STATUS%3AASC%3BBASE_STATUS%3ADESC) ## Proof of Work ## Checklist - [ ] Have you linked a jira ticket and/or is the ticket in the title? - [ ] Have you checked whether your jira ticket required DOCSP changes? - [ ] Have you added changelog file? - use `skip-changelog` label if not needed - refer to [Changelog files and Release Notes](https://github.com/mongodb/mongodb-kubernetes/blob/master/CONTRIBUTING.md#changelog-files-and-release-notes) section in CONTRIBUTING.md for more details --------- Co-authored-by: mms-build-account Co-authored-by: Evergreen Co-authored-by: Vivek Singh --- .evergreen.yml | 77 +++-- .githooks/pre-commit | 4 +- build_info.json | 56 ++-- build_info_agent.json | 20 +- config/manager/manager.yaml | 4 +- docker/mongodb-agent-non-matrix/README.md | 27 -- docker/mongodb-agent/Dockerfile.atomic | 24 +- docker/mongodb-agent/Dockerfile.old | 2 +- ...readinessprobe.sh => dummy-readinessprobe} | 0 helm_chart/values-openshift.yaml | 2 +- pipeline.py | 2 +- pkg/kubectl-mongodb/common/common_test.go | 8 +- public/mongodb-kubernetes-openshift.yaml | 4 +- release.json | 2 +- scripts/dev/contexts/e2e_smoke_arm | 1 - scripts/dev/contexts/e2e_smoke_ibm_power | 9 +- scripts/dev/contexts/e2e_smoke_ibm_z | 9 +- scripts/dev/contexts/e2e_static_smoke_arm | 20 ++ .../dev/contexts/e2e_static_smoke_ibm_power | 16 ++ scripts/dev/contexts/e2e_static_smoke_ibm_z | 15 + .../dev/contexts/variables/e2e_ibm_smoke_base | 11 + .../agent/detect_ops_manager_changes.py | 209 +++++++++++++- scripts/release/agent/validation.py | 262 +++++++++++++++--- scripts/release/atomic_pipeline.py | 88 ++++-- scripts/release/atomic_pipeline_test.py | 161 +---------- .../tests/test_detect_ops_manager_changes.py | 74 +++-- 26 files changed, 735 insertions(+), 372 deletions(-) delete mode 100644 docker/mongodb-agent-non-matrix/README.md rename docker/mongodb-agent/{dummy-readinessprobe.sh => dummy-readinessprobe} (100%) create mode 100644 scripts/dev/contexts/e2e_static_smoke_arm create mode 100644 scripts/dev/contexts/e2e_static_smoke_ibm_power create mode 100644 scripts/dev/contexts/e2e_static_smoke_ibm_z create mode 100644 scripts/dev/contexts/variables/e2e_ibm_smoke_base diff --git a/.evergreen.yml b/.evergreen.yml index f77f75605..08f5960c0 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -33,8 +33,6 @@ variables: variant: init_test_run - name: build_init_om_images_ubi variant: init_test_run - - name: build_agent_images_ubi - variant: init_test_run - &base_no_om_image_dependency depends_on: @@ -52,8 +50,6 @@ variables: variant: init_test_run - name: build_init_appdb_images_ubi variant: init_test_run - - name: build_agent_images_ubi - variant: init_test_run - &community_dependency depends_on: @@ -67,8 +63,6 @@ variables: variant: init_test_run - name: build_mco_test_image variant: init_test_run - - name: build_agent_images_ubi - variant: init_test_run - &setup_group setup_group_can_fail_task: true @@ -160,8 +154,6 @@ variables: variant: init_test_run - name: build_init_om_images_ubi variant: init_test_run - - name: build_agent_images_ubi - variant: init_test_run - &base_om7_dependency_with_race depends_on: @@ -179,8 +171,6 @@ variables: variant: init_test_run - name: build_init_om_images_ubi variant: init_test_run - - name: build_agent_images_ubi - variant: init_test_run - &base_om8_dependency depends_on: @@ -198,8 +188,6 @@ variables: variant: init_test_run - name: build_init_om_images_ubi variant: init_test_run - - name: build_agent_images_ubi - variant: init_test_run parameters: - key: evergreen_retry @@ -1489,8 +1477,6 @@ buildvariants: variant: init_test_run - name: build_init_om_images_ubi variant: init_test_run - - name: build_agent_images_ubi - variant: init_test_run - name: build_test_image_ibm variant: init_test_run_ibm_power tasks: @@ -1514,8 +1500,6 @@ buildvariants: variant: init_test_run - name: build_init_om_images_ubi variant: init_test_run - - name: build_agent_images_ubi - variant: init_test_run - name: build_test_image_ibm variant: init_test_run_ibm_z tasks: @@ -1531,6 +1515,62 @@ buildvariants: tasks: - name: e2e_smoke_arm_task_group + - name: e2e_static_smoke_arm + display_name: e2e_smoke_arm + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - ubuntu2204-arm64-large + allowed_requesters: [ "patch", "github_tag" ] + <<: *base_no_om_image_dependency + tasks: + - name: e2e_smoke_arm_task_group + + - name: e2e_static_smoke_ibm_z + display_name: e2e_static_smoke_ibm_z + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - rhel9-zseries-small + - rhel9-zseries-large + allowed_requesters: [ "patch", "github_tag" ] + depends_on: + - name: build_operator_ubi + variant: init_test_run + - name: build_init_database_image_ubi + variant: init_test_run + - name: build_database_image_ubi + variant: init_test_run + - name: build_init_appdb_images_ubi + variant: init_test_run + - name: build_init_om_images_ubi + variant: init_test_run + - name: build_test_image_ibm + variant: init_test_run_ibm_z + tasks: + - name: e2e_smoke_ibm_task_group + + - name: e2e_static_smoke_ibm_power + display_name: e2e_static_smoke_ibm_power + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - rhel9-power-small + - rhel9-power-large + allowed_requesters: [ "patch", "github_tag" ] + depends_on: + - name: build_operator_ubi + variant: init_test_run + - name: build_init_database_image_ubi + variant: init_test_run + - name: build_database_image_ubi + variant: init_test_run + - name: build_init_appdb_images_ubi + variant: init_test_run + - name: build_init_om_images_ubi + variant: init_test_run + - name: build_test_image_ibm + variant: init_test_run_ibm_power + tasks: + - name: e2e_smoke_ibm_task_group + - name: e2e_static_smoke display_name: e2e_static_smoke tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] @@ -1657,8 +1697,6 @@ buildvariants: variant: init_test_run - name: prepare_and_upload_openshift_bundles_for_e2e variant: init_tests_with_olm - - name: build_agent_images_ubi - variant: init_test_run tasks: - name: e2e_kind_olm_group @@ -1682,9 +1720,6 @@ buildvariants: variant: init_tests_with_olm - name: build_init_database_image_ubi variant: init_test_run - - name: build_agent_images_ubi - variant: init_test_run - tasks: - name: e2e_kind_olm_group diff --git a/.githooks/pre-commit b/.githooks/pre-commit index 4f6af7b50..d68ef1ac7 100755 --- a/.githooks/pre-commit +++ b/.githooks/pre-commit @@ -117,9 +117,9 @@ function update_release_json() { } function regenerate_public_rbac_multi_cluster() { - if echo "$git_last_changed" | grep -q 'public/tools/multicluster'; then + if echo "$git_last_changed" | grep -q -e 'cmd/kubectl-mongodb' -e 'pkg/kubectl-mongodb'; then echo 'regenerating multicluster RBAC public example' - pushd public/tools/multicluster + pushd pkg/kubectl-mongodb/common/ EXPORT_RBAC_SAMPLES="true" go test ./... -run TestPrintingOutRolesServiceAccountsAndRoleBindings popd git add public/samples/multi-cluster-cli-gitops diff --git a/build_info.json b/build_info.json index 8a15eb98c..506ca3d15 100644 --- a/build_info.json +++ b/build_info.json @@ -4,8 +4,11 @@ "dockerfile-path": "docker/mongodb-kubernetes-operator/Dockerfile.atomic", "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes", - "platforms": [ - "linux/amd64" + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -50,8 +53,11 @@ "dockerfile-path": "docker/mongodb-kubernetes-init-database/Dockerfile.atomic", "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database", - "platforms": [ - "linux/amd64" + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -79,8 +85,11 @@ "dockerfile-path": "docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb", - "platforms": [ - "linux/amd64" + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -108,8 +117,11 @@ "dockerfile-path": "docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager", - "platforms": [ - "linux/amd64" + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -137,8 +149,11 @@ "dockerfile-path": "docker/mongodb-kubernetes-database/Dockerfile.atomic", "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database", - "platforms": [ - "linux/amd64" + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -198,8 +213,11 @@ "dockerfile-path": "docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe", - "platforms": [ - "linux/amd64" + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -228,8 +246,11 @@ "dockerfile-path": "docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-operator-version-upgrade-post-start-hook", - "platforms": [ - "linux/amd64" + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { @@ -258,8 +279,11 @@ "dockerfile-path": "docker/mongodb-agent/Dockerfile.atomic", "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", - "platforms": [ - "linux/amd64" + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { diff --git a/build_info_agent.json b/build_info_agent.json index 634a316c6..3a7906fd1 100644 --- a/build_info_agent.json +++ b/build_info_agent.json @@ -1,20 +1,24 @@ { "platform_mappings": { "linux/amd64": { - "agent_suffix": "linux_x86_64.tar.gz", - "tools_suffix": "rhel93-x86_64-{TOOLS_VERSION}.tgz" + "agent_suffixes": ["linux_x86_64.tar.gz"], + "tools_suffix": "rhel93-x86_64-{TOOLS_VERSION}.tgz", + "tools_suffix_old": "rhel90-x86_64-{TOOLS_VERSION}.tgz" }, "linux/arm64": { - "agent_suffix": "amzn2_aarch64.tar.gz", - "tools_suffix": "rhel93-aarch64-{TOOLS_VERSION}.tgz" + "agent_suffixes": ["amzn2_aarch64.tar.gz"], + "tools_suffix": "rhel93-aarch64-{TOOLS_VERSION}.tgz", + "tools_suffix_old": "rhel90-aarch64-{TOOLS_VERSION}.tgz" }, "linux/s390x": { - "agent_suffix": "rhel7_s390x.tar.gz", - "tools_suffix": "rhel9-s390x-{TOOLS_VERSION}.tgz" + "agent_suffixes": ["rhel7_s390x.tar.gz", "rhel8_s390x.tar.gz", "rhel9_s390x.tar.gz"], + "tools_suffix": "rhel9-s390x-{TOOLS_VERSION}.tgz", + "tools_suffix_old": "rhel83-s390x-{TOOLS_VERSION}.tgz" }, "linux/ppc64le": { - "agent_suffix": "rhel8_ppc64le.tar.gz", - "tools_suffix": "rhel9-ppc64le-{TOOLS_VERSION}.tgz" + "agent_suffixes": ["rhel8_ppc64le.tar.gz", "rhel7_ppc64le.tar.gz", "rhel9_ppc64le.tar.gz"], + "tools_suffix": "rhel9-ppc64le-{TOOLS_VERSION}.tgz", + "tools_suffix_old": "rhel81-ppc64le-{TOOLS_VERSION}.tgz" } }, "base_names": { diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 77997de55..c93f3acb5 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -162,8 +162,8 @@ spec: value: "quay.io/mongodb/mongodb-agent-ubi:12.0.34.7888-1" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1 value: "quay.io/mongodb/mongodb-agent-ubi:12.0.35.7911-1" - - name: RELATED_IMAGE_AGENT_IMAGE_13_37_0_9590_1 - value: "quay.io/mongodb/mongodb-agent-ubi:13.37.0.9590-1" + - name: RELATED_IMAGE_AGENT_IMAGE_13_38_0_9654_1 + value: "quay.io/mongodb/mongodb-agent-ubi:13.38.0.9654-1" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_26 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.26" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_27 diff --git a/docker/mongodb-agent-non-matrix/README.md b/docker/mongodb-agent-non-matrix/README.md deleted file mode 100644 index c50d889c4..000000000 --- a/docker/mongodb-agent-non-matrix/README.md +++ /dev/null @@ -1,27 +0,0 @@ -### Building locally - -For building the MongoDB Agent (non-static) image locally use the example command: - -TODO: What to do with label quay.expires-after=48h? -```bash -AGENT_VERSION="108.0.7.8810-1" -TOOLS_VERSION="100.12.0" -MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" -MONGODB_AGENT_URL="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" -BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" - -docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent-non-matrix/Dockerfile -t "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}" \ - --build-arg version="${AGENT_VERSION}" \ - --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL}" \ - --build-arg mongodb_agent_url="${MONGODB_AGENT_URL}" \ - --build-arg mongodb_agent_version_s390x="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz" \ - --build-arg mongodb_agent_version_ppc64le="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" \ - --build-arg mongodb_agent_version_amd64="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" \ - --build-arg mongodb_agent_version_arm64="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" \ - --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-${TOOLS_VERSION}.tgz" \ - --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-${TOOLS_VERSION}.tgz" \ - --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-${TOOLS_VERSION}.tgz" \ - --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-${TOOLS_VERSION}.tgz" - -docker push "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}" -``` diff --git a/docker/mongodb-agent/Dockerfile.atomic b/docker/mongodb-agent/Dockerfile.atomic index 5160285ce..645dec816 100644 --- a/docker/mongodb-agent/Dockerfile.atomic +++ b/docker/mongodb-agent/Dockerfile.atomic @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal AS tools_downloader +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.0.0 AS tools_downloader ARG TARGETPLATFORM ARG mongodb_tools_url @@ -8,7 +8,7 @@ ARG mongodb_tools_version_amd64 ARG mongodb_tools_version_arm64 RUN microdnf -y update --nodocs \ - && microdnf -y install --nodocs tar gzip curl \ + && microdnf -y install --nodocs tar gzip \ && microdnf clean all RUN case ${TARGETPLATFORM} in \ @@ -23,7 +23,7 @@ RUN case ${TARGETPLATFORM} in \ RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ && rm /tools/mongodb_tools.tgz -FROM registry.access.redhat.com/ubi8/ubi-minimal AS agent_downloader +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.0.0 AS agent_downloader ARG TARGETPLATFORM ARG mongodb_agent_url @@ -33,7 +33,7 @@ ARG mongodb_agent_version_amd64 ARG mongodb_agent_version_arm64 RUN microdnf -y update --nodocs \ - && microdnf -y install --nodocs tar gzip curl \ + && microdnf -y install --nodocs tar gzip \ && microdnf clean all RUN case ${TARGETPLATFORM} in \ @@ -51,7 +51,7 @@ RUN tar xfz /agent/mongodb_agent.tgz \ && rm /agent/mongodb_agent.tgz \ && rm -r mongodb-mms-automation-agent-* -FROM registry.access.redhat.com/ubi9/ubi-minimal +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.0.0 ARG TARGETARCH @@ -64,11 +64,7 @@ COPY --from=agent_downloader "/agent/" /agent/ # Replace libcurl-minimal and curl-minimal with the full versions # https://bugzilla.redhat.com/show_bug.cgi?id=1994521 -RUN microdnf install -y libssh libpsl libbrotli \ - && microdnf download curl libcurl \ - && rpm -Uvh --nodeps --replacefiles "*curl*$( uname -i ).rpm" \ - && microdnf remove -y libcurl-minimal curl-minimal - +RUN microdnf install -y libssh libpsl libbrotli RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper # Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ RUN microdnf install -y --disableplugin=subscription-manager \ @@ -92,10 +88,10 @@ RUN mkdir -p /agent \ # Copy scripts to a safe location that won't be overwritten by volume mount COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE -COPY ./docker/mongodb-agent/agent-launcher-shim.sh /opt/scripts/agent-launcher-shim.sh -COPY ./docker/mongodb-agent/setup-agent-files.sh /opt/scripts/setup-agent-files.sh -COPY ./docker/mongodb-agent/dummy-probe.sh /opt/scripts/dummy-probe.sh -COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /opt/scripts/dummy-readinessprobe.sh +COPY ./docker/mongodb-agent/agent-launcher-shim.sh /usr/local/bin/agent-launcher-shim.sh +COPY ./docker/mongodb-agent/setup-agent-files.sh /usr/local/bin/setup-agent-files.sh +COPY ./docker/mongodb-agent/dummy-probe.sh /usr/local/bin/dummy-probe.sh +COPY ./docker/mongodb-agent/dummy-readinessprobe /usr/local/bin/dummy-readinessprobe # Set up directories and permissions (agent and tools are already extracted in downloader stages) RUN mkdir -p /var/lib/automation/config \ diff --git a/docker/mongodb-agent/Dockerfile.old b/docker/mongodb-agent/Dockerfile.old index 80d5c8da6..9022dd86f 100644 --- a/docker/mongodb-agent/Dockerfile.old +++ b/docker/mongodb-agent/Dockerfile.old @@ -49,7 +49,7 @@ COPY --from=base /data/LICENSE /licenses/LICENSE COPY --from=base /opt/scripts/agent-launcher-shim.sh /usr/local/bin/agent-launcher-shim.sh COPY --from=base /opt/scripts/setup-agent-files.sh /usr/local/bin/setup-agent-files.sh COPY --from=base /opt/scripts/dummy-probe.sh /usr/local/bin/dummy-probe.sh -COPY --from=base /opt/scripts/dummy-readinessprobe.sh /usr/local/bin/dummy-readinessprobe +COPY --from=base /opt/scripts/dummy-readinessprobe /usr/local/bin/dummy-readinessprobe RUN tar xfz /agent/mongodb-agent.tar.gz \ && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ diff --git a/docker/mongodb-agent/dummy-readinessprobe.sh b/docker/mongodb-agent/dummy-readinessprobe similarity index 100% rename from docker/mongodb-agent/dummy-readinessprobe.sh rename to docker/mongodb-agent/dummy-readinessprobe diff --git a/helm_chart/values-openshift.yaml b/helm_chart/values-openshift.yaml index 3b6bb789d..9e06ff566 100644 --- a/helm_chart/values-openshift.yaml +++ b/helm_chart/values-openshift.yaml @@ -115,7 +115,7 @@ relatedImages: - 108.0.7.8810-1 - 12.0.34.7888-1 - 12.0.35.7911-1 - - 13.37.0.9590-1 + - 13.38.0.9654-1 mongodbLegacyAppDb: - 4.2.11-ent - 4.2.2-ent diff --git a/pipeline.py b/pipeline.py index 2935377f6..5156992b1 100755 --- a/pipeline.py +++ b/pipeline.py @@ -46,12 +46,12 @@ get_supported_version_for_image, ) from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli +from scripts.release.agent.detect_ops_manager_changes import detect_ops_manager_changes from scripts.release.build.image_signing import ( mongodb_artifactory_login, sign_image, verify_signature, ) -from scripts.release.detect_ops_manager_changes import detect_ops_manager_changes TRACER = trace.get_tracer("evergreen-agent") diff --git a/pkg/kubectl-mongodb/common/common_test.go b/pkg/kubectl-mongodb/common/common_test.go index fd176e220..31932530d 100644 --- a/pkg/kubectl-mongodb/common/common_test.go +++ b/pkg/kubectl-mongodb/common/common_test.go @@ -486,7 +486,7 @@ func TestPrintingOutRolesServiceAccountsAndRoleBindings(t *testing.T) { sb = marshalToYaml(t, sb, "Central Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRoleBinding", crb.Items) sb = marshalToYaml(t, sb, "Central Cluster, cluster-scoped resources", "v1", "ServiceAccount", sa.Items) - err = os.WriteFile("../../../../samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) + err = os.WriteFile("../../../public/samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) assert.NoError(t, err) } @@ -505,7 +505,7 @@ func TestPrintingOutRolesServiceAccountsAndRoleBindings(t *testing.T) { sb = marshalToYaml(t, sb, "Member Cluster, cluster-scoped resources", "rbac.authorization.k8s.io/v1", "ClusterRoleBinding", crb.Items) sb = marshalToYaml(t, sb, "Member Cluster, cluster-scoped resources", "v1", "ServiceAccount", sa.Items) - err = os.WriteFile("../../../../samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) + err = os.WriteFile("../../../public/samples/multi-cluster-cli-gitops/resources/rbac/cluster_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) assert.NoError(t, err) } @@ -526,7 +526,7 @@ func TestPrintingOutRolesServiceAccountsAndRoleBindings(t *testing.T) { sb = marshalToYaml(t, sb, "Central Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "RoleBinding", rb.Items) sb = marshalToYaml(t, sb, "Central Cluster, namespace-scoped resources", "v1", "ServiceAccount", sa.Items) - err = os.WriteFile("../../../../samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) + err = os.WriteFile("../../../public/samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_central_cluster.yaml", []byte(sb.String()), os.ModePerm) assert.NoError(t, err) } @@ -547,7 +547,7 @@ func TestPrintingOutRolesServiceAccountsAndRoleBindings(t *testing.T) { sb = marshalToYaml(t, sb, "Member Cluster, namespace-scoped resources", "rbac.authorization.k8s.io/v1", "RoleBinding", rb.Items) sb = marshalToYaml(t, sb, "Member Cluster, namespace-scoped resources", "v1", "ServiceAccount", sa.Items) - err = os.WriteFile("../../../../samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) + err = os.WriteFile("../../../public/samples/multi-cluster-cli-gitops/resources/rbac/namespace_scoped_member_cluster.yaml", []byte(sb.String()), os.ModePerm) assert.NoError(t, err) } } diff --git a/public/mongodb-kubernetes-openshift.yaml b/public/mongodb-kubernetes-openshift.yaml index 1d25900e2..fda4f6729 100644 --- a/public/mongodb-kubernetes-openshift.yaml +++ b/public/mongodb-kubernetes-openshift.yaml @@ -466,8 +466,8 @@ spec: value: "quay.io/mongodb/mongodb-agent-ubi:12.0.34.7888-1" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1 value: "quay.io/mongodb/mongodb-agent-ubi:12.0.35.7911-1" - - name: RELATED_IMAGE_AGENT_IMAGE_13_37_0_9590_1 - value: "quay.io/mongodb/mongodb-agent-ubi:13.37.0.9590-1" + - name: RELATED_IMAGE_AGENT_IMAGE_13_38_0_9654_1 + value: "quay.io/mongodb/mongodb-agent-ubi:13.38.0.9654-1" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_26 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.26" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_27 diff --git a/release.json b/release.json index 9b308deaa..818737f55 100644 --- a/release.json +++ b/release.json @@ -106,7 +106,7 @@ ], "opsManagerMapping": { "Description": "These are the agents from which we start supporting static containers.", - "cloud_manager": "13.37.0.9590-1", + "cloud_manager": "13.38.0.9654-1", "cloud_manager_tools": "100.12.2", "ops_manager": { "6.0.26": { diff --git a/scripts/dev/contexts/e2e_smoke_arm b/scripts/dev/contexts/e2e_smoke_arm index 6ca95ea73..eaa80356d 100644 --- a/scripts/dev/contexts/e2e_smoke_arm +++ b/scripts/dev/contexts/e2e_smoke_arm @@ -15,6 +15,5 @@ CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${scr export CUSTOM_OM_VERSION export CUSTOM_MDB_VERSION=6.0.5-ent -export CUSTOM_MDB_PREV_VERSION=5.0.7-ent export KUBE_ENVIRONMENT_NAME=kind export CLUSTER_TYPE=kind diff --git a/scripts/dev/contexts/e2e_smoke_ibm_power b/scripts/dev/contexts/e2e_smoke_ibm_power index e811437d9..d5d218f38 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_power +++ b/scripts/dev/contexts/e2e_smoke_ibm_power @@ -6,16 +6,9 @@ script_name=$(readlink -f "${BASH_SOURCE[0]}") script_dir=$(dirname "${script_name}") source "${script_dir}/root-context" - -export ops_manager_version="cloud_qa" +source "${script_dir}/variables/e2e_ibm_smoke_base" # This is required to be able to rebuild the om image and use that image which has been rebuild export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') export CUSTOM_OM_VERSION - -export CUSTOM_MDB_VERSION=6.0.5-ent -export CUSTOM_MDB_PREV_VERSION=5.0.7-ent -export KUBE_ENVIRONMENT_NAME=minikube -export CLUSTER_TYPE=minikube -export CONTAINER_RUNTIME=podman diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z index e811437d9..d5d218f38 100644 --- a/scripts/dev/contexts/e2e_smoke_ibm_z +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -6,16 +6,9 @@ script_name=$(readlink -f "${BASH_SOURCE[0]}") script_dir=$(dirname "${script_name}") source "${script_dir}/root-context" - -export ops_manager_version="cloud_qa" +source "${script_dir}/variables/e2e_ibm_smoke_base" # This is required to be able to rebuild the om image and use that image which has been rebuild export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') export CUSTOM_OM_VERSION - -export CUSTOM_MDB_VERSION=6.0.5-ent -export CUSTOM_MDB_PREV_VERSION=5.0.7-ent -export KUBE_ENVIRONMENT_NAME=minikube -export CLUSTER_TYPE=minikube -export CONTAINER_RUNTIME=podman diff --git a/scripts/dev/contexts/e2e_static_smoke_arm b/scripts/dev/contexts/e2e_static_smoke_arm new file mode 100644 index 000000000..eda35c993 --- /dev/null +++ b/scripts/dev/contexts/e2e_static_smoke_arm @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" + +export ops_manager_version="cloud_qa" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION + +export CUSTOM_MDB_VERSION=7.0.22 # we only have ibm static images starting with 7.0.22 +export KUBE_ENVIRONMENT_NAME=kind +export CLUSTER_TYPE=kind +export MDB_DEFAULT_ARCHITECTURE=static diff --git a/scripts/dev/contexts/e2e_static_smoke_ibm_power b/scripts/dev/contexts/e2e_static_smoke_ibm_power new file mode 100644 index 000000000..d80fd7625 --- /dev/null +++ b/scripts/dev/contexts/e2e_static_smoke_ibm_power @@ -0,0 +1,16 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" +source "${script_dir}/variables/e2e_ibm_smoke_base" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION +export MDB_DEFAULT_ARCHITECTURE=static + diff --git a/scripts/dev/contexts/e2e_static_smoke_ibm_z b/scripts/dev/contexts/e2e_static_smoke_ibm_z new file mode 100644 index 000000000..9182c48c3 --- /dev/null +++ b/scripts/dev/contexts/e2e_static_smoke_ibm_z @@ -0,0 +1,15 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" +source "${script_dir}/variables/e2e_ibm_smoke_base" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION +export MDB_DEFAULT_ARCHITECTURE=static diff --git a/scripts/dev/contexts/variables/e2e_ibm_smoke_base b/scripts/dev/contexts/variables/e2e_ibm_smoke_base new file mode 100644 index 000000000..4cd7d27b3 --- /dev/null +++ b/scripts/dev/contexts/variables/e2e_ibm_smoke_base @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +export ops_manager_version="cloud_qa" + +export CUSTOM_MDB_VERSION=7.0.22 # we only have ibm static images starting with 7.0.22 +export KUBE_ENVIRONMENT_NAME=minikube +export CLUSTER_TYPE=minikube +export CONTAINER_RUNTIME=podman + diff --git a/scripts/release/agent/detect_ops_manager_changes.py b/scripts/release/agent/detect_ops_manager_changes.py index 65fe8fee9..4f3f6f033 100644 --- a/scripts/release/agent/detect_ops_manager_changes.py +++ b/scripts/release/agent/detect_ops_manager_changes.py @@ -60,6 +60,41 @@ def extract_ops_manager_mapping(release_data: Dict) -> Dict: return release_data.get("supportedImages", {}).get("mongodb-agent", {}).get("opsManagerMapping", {}) +def _is_later_agent_version(version1: str, version2: str) -> bool: + """ + Compare two agent versions and return True if version1 is later than version2. + Agent versions are in format like "13.37.0.9590-1" or "108.0.12.8846-1" + """ + if not version1 or not version2: + return False + + def split_version(version: str) -> List[int]: + """Split version string into numeric parts, ignoring suffix after '-'""" + parts = [] + version_part = version.split("-")[0] # Remove suffix like "-1" + for part in version_part.split("."): + try: + parts.append(int(part)) + except ValueError: + # If we can't parse a part as int, skip it + continue + return parts + + v1_parts = split_version(version1) + v2_parts = split_version(version2) + + # Compare each part + max_len = max(len(v1_parts), len(v2_parts)) + for i in range(max_len): + v1_part = v1_parts[i] if i < len(v1_parts) else 0 + v2_part = v2_parts[i] if i < len(v2_parts) else 0 + + if v1_part != v2_part: + return v1_part > v2_part + + return False # Versions are equal + + def get_changed_agents(current_mapping: Dict, base_mapping: Dict) -> List[Tuple[str, str]]: """Returns list of (agent_version, tools_version) tuples for added/changed agents""" added_agents = [] @@ -135,7 +170,156 @@ def get_all_agents_for_rebuild() -> List[Tuple[str, str]]: def get_currently_used_agents() -> List[Tuple[str, str]]: - """Returns list of (agent_version, tools_version) tuples for agents currently used in contexts""" + """Returns list of (agent_version, tools_version) tuples for agents currently used in contexts and cloudmanager agent from release.json""" + logger.info("Getting currently used agents from contexts") + agents = [] + + try: + release_data = load_current_release_json() + if not release_data: + logger.error("Could not load release.json") + return [] + + ops_manager_mapping = extract_ops_manager_mapping(release_data) + ops_manager_versions = ops_manager_mapping.get("ops_manager", {}) + + # Search all context files + context_pattern = "scripts/dev/contexts/**/*" + context_files = glob.glob(context_pattern, recursive=True) + + for context_file in context_files: + if os.path.isfile(context_file): + try: + with open(context_file, "r") as f: + content = f.read() + + # Extract AGENT_VERSION from the context file + for line in content.split("\n"): + if line.startswith("export AGENT_VERSION="): + agent_version = line.split("=")[1].strip() + tools_version = get_tools_version_for_agent(agent_version) + agents.append((agent_version, tools_version)) + logger.info(f"Found agent {agent_version} in {context_file}") + break + + # Extract CUSTOM_OM_VERSION and map to agent version + for line in content.split("\n"): + if line.startswith("export CUSTOM_OM_VERSION="): + om_version = line.split("=")[1].strip() + if om_version in ops_manager_versions: + agent_tools = ops_manager_versions[om_version] + agent_version = agent_tools.get("agent_version") + tools_version = agent_tools.get("tools_version") + if agent_version and tools_version: + agents.append((agent_version, tools_version)) + logger.info( + f"Found OM version {om_version} -> agent {agent_version} in {context_file}" + ) + break + + except Exception as e: + logger.debug(f"Error reading context file {context_file}: {e}") + + # Also add the cloudmanager agent from release.json + cloud_manager_agent = ops_manager_mapping.get("cloud_manager") + cloud_manager_tools = ops_manager_mapping.get("cloud_manager_tools") + if cloud_manager_agent and cloud_manager_tools: + agents.append((cloud_manager_agent, cloud_manager_tools)) + logger.info(f"Found cloudmanager agent from release.json: {cloud_manager_agent}") + + # Also add the main agentVersion from release.json + main_agent_version = release_data.get("agentVersion") + if main_agent_version: + tools_version = get_tools_version_for_agent(main_agent_version) + agents.append((main_agent_version, tools_version)) + logger.info(f"Found main agent version from release.json: {main_agent_version}") + + unique_agents = list(set(agents)) + logger.info(f"Found {len(unique_agents)} currently used agents") + return unique_agents + + except Exception as e: + logger.error(f"Error getting currently used agents: {e}") + return [] + + +def detect_ops_manager_changes() -> List[Tuple[str, str]]: + """Returns (has_changes, changed_agents_list)""" + logger.info("=== Detecting OM Mapping Changes (Local vs Base) ===") + + current_release = load_current_release_json() + if not current_release: + logger.error("Could not load current local release.json") + return [] + + master_release = load_release_json_from_master() + if not master_release: + logger.warning("Could not load base release.json, assuming changes exist") + return [] + + current_mapping = extract_ops_manager_mapping(current_release) + base_mapping = extract_ops_manager_mapping(master_release) + + if current_mapping != base_mapping: + return get_changed_agents(current_mapping, base_mapping) + else: + return [] + + +def get_tools_version_for_agent(agent_version: str) -> str: + """Get tools version for a given agent version from release.json""" + release_data = load_current_release_json() + if not release_data: + return "100.12.2" # Default fallback + + ops_manager_mapping = extract_ops_manager_mapping(release_data) + ops_manager_versions = ops_manager_mapping.get("ops_manager", {}) + + # Search through all OM versions to find matching agent version + for om_version, agent_tools in ops_manager_versions.items(): + if agent_tools.get("agent_version") == agent_version: + return agent_tools.get("tools_version", "100.12.2") + + # Check cloud_manager tools version as fallback + return ops_manager_mapping.get("cloud_manager_tools", "100.12.2") + + +def get_all_agents_for_rebuild() -> List[Tuple[str, str]]: + """Returns list of (agent_version, tools_version) tuples for all agents in release.json""" + agents = [] + + release_data = load_current_release_json() + if not release_data: + logger.error("Could not load release.json") + return [] + + ops_manager_mapping = extract_ops_manager_mapping(release_data) + + # Get all ops_manager agents + ops_manager_versions = ops_manager_mapping.get("ops_manager", {}) + for om_version, agent_tools in ops_manager_versions.items(): + agent_version = agent_tools.get("agent_version") + tools_version = agent_tools.get("tools_version") + if agent_version and tools_version: + agents.append((agent_version, tools_version)) + + # Get cloud_manager agent + cloud_manager_agent = ops_manager_mapping.get("cloud_manager") + cloud_manager_tools = ops_manager_mapping.get("cloud_manager_tools") + if cloud_manager_agent and cloud_manager_tools: + agents.append((cloud_manager_agent, cloud_manager_tools)) + + # Get the main agent version from release.json root + main_agent_version = release_data.get("agentVersion") + if main_agent_version: + tools_version = get_tools_version_for_agent(main_agent_version) + agents.append((main_agent_version, tools_version)) + + return list(set(agents)) + + +def get_currently_used_agents() -> List[Tuple[str, str]]: + """Returns list of (agent_version, tools_version) tuples for agents currently used in contexts and cloudmanager agent from release.json""" logger.info("Getting currently used agents from contexts") agents = [] @@ -159,30 +343,39 @@ def get_currently_used_agents() -> List[Tuple[str, str]]: content = f.read() # Extract AGENT_VERSION from the context file - for line in content.split('\n'): - if line.startswith('export AGENT_VERSION='): - agent_version = line.split('=')[1].strip() + for line in content.split("\n"): + if line.startswith("export AGENT_VERSION="): + agent_version = line.split("=")[1].strip() tools_version = get_tools_version_for_agent(agent_version) agents.append((agent_version, tools_version)) logger.info(f"Found agent {agent_version} in {context_file}") break # Extract CUSTOM_OM_VERSION and map to agent version - for line in content.split('\n'): - if line.startswith('export CUSTOM_OM_VERSION='): - om_version = line.split('=')[1].strip() + for line in content.split("\n"): + if line.startswith("export CUSTOM_OM_VERSION="): + om_version = line.split("=")[1].strip() if om_version in ops_manager_versions: agent_tools = ops_manager_versions[om_version] agent_version = agent_tools.get("agent_version") tools_version = agent_tools.get("tools_version") if agent_version and tools_version: agents.append((agent_version, tools_version)) - logger.info(f"Found OM version {om_version} -> agent {agent_version} in {context_file}") + logger.info( + f"Found OM version {om_version} -> agent {agent_version} in {context_file}" + ) break except Exception as e: logger.debug(f"Error reading context file {context_file}: {e}") + # Also add the cloudmanager agent from release.json + cloud_manager_agent = ops_manager_mapping.get("cloud_manager") + cloud_manager_tools = ops_manager_mapping.get("cloud_manager_tools") + if cloud_manager_agent and cloud_manager_tools: + agents.append((cloud_manager_agent, cloud_manager_tools)) + logger.info(f"Found cloudmanager agent from release.json: {cloud_manager_agent}") + # Also add the main agentVersion from release.json main_agent_version = release_data.get("agentVersion") if main_agent_version: diff --git a/scripts/release/agent/validation.py b/scripts/release/agent/validation.py index b7e83ca8c..792b01f43 100644 --- a/scripts/release/agent/validation.py +++ b/scripts/release/agent/validation.py @@ -1,5 +1,6 @@ import json -from typing import List +import sys +from typing import Callable, List import requests @@ -12,80 +13,263 @@ def load_agent_build_info(): return json.load(f) -def validate_agent_version_exists(agent_version: str, platforms: List[str]) -> bool: +def _validate_version_exists( + version: str, + platforms: List[str], + base_url: str, + filename_builder: Callable[[dict, str, str], str], + version_type: str, +) -> bool: """ - Validate that the agent version exists for all specified platforms by checking URLs. + Generic validation function for checking if a version exists for all specified platforms. Args: - agent_version: MongoDB agent version to validate + version: Version to validate platforms: List of platforms to check + base_url: Base URL for downloads + filename_builder: Function that builds filename from (agent_info, version, platform) + version_type: Type of version being validated (for logging) + platform_not_found_action: Action when platform not found ("exit" or "continue") Returns: - True if agent version exists for all platforms, False otherwise + True if version exists for all platforms, False otherwise """ agent_info = load_agent_build_info() - agent_base_url = ( - "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" - ) for platform in platforms: if platform not in agent_info["platform_mappings"]: - logger.warning(f"Platform {platform} not found in agent mappings, skipping validation") - continue + logger.error(f"Platform {platform} not found in agent mappings, skipping validation") + sys.exit(1) - mapping = agent_info["platform_mappings"][platform] - agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" - agent_url = f"{agent_base_url}/{agent_filename}" + filename = filename_builder(agent_info, version, platform) + url = f"{base_url}/{filename}" try: # Use HEAD request to check if URL exists without downloading the file - response = requests.head(agent_url, timeout=30) + response = requests.head(url, timeout=30) if response.status_code != 200: - logger.warning(f"Agent version {agent_version} not found for platform {platform} at {agent_url} (HTTP {response.status_code})") + logger.warning( + f"{version_type.title()} version {version} not found for platform {platform} at {url} (HTTP {response.status_code})" + ) return False - logger.debug(f"Agent version {agent_version} validated for platform {platform}") + logger.debug(f"{version_type.title()} version {version} validated for platform {platform}") except requests.RequestException as e: - logger.warning(f"Failed to validate agent version {agent_version} for platform {platform}: {e}") + logger.warning(f"Failed to validate {version_type} version {version} for platform {platform}: {e}") return False - logger.info(f"Agent version {agent_version} validated for all platforms: {platforms}") + logger.info(f"{version_type.title()} version {version} validated for all platforms: {platforms}") return True -def validate_tools_version_exists(tools_version: str, platforms: List[str]) -> bool: +def _build_agent_filenames(agent_info: dict, agent_version: str, platform: str) -> List[str]: + """Build all possible agent filenames for a given platform and version.""" + mapping = agent_info["platform_mappings"][platform] + filenames = [] + for agent_suffix in mapping["agent_suffixes"]: + filename = f"{agent_info['base_names']['agent']}-{agent_version}.{agent_suffix}" + filenames.append(filename) + return filenames + + +def _get_available_platforms_with_fallback( + version: str, + platforms: List[str], + base_url: str, + filenames_builder: Callable[[dict, str, str], List[str]], + version_type: str, +) -> List[str]: """ - Validate that the tools version exists for all specified platforms by checking URLs. + Generic function to get the list of platforms where a version is actually available, + trying multiple filename possibilities for each platform. Args: - tools_version: MongoDB tools version to validate + version: Version to check platforms: List of platforms to check + base_url: Base URL for downloads + filenames_builder: Function that builds list of filenames from (agent_info, version, platform) + version_type: Type of version being validated (for logging) Returns: - True if tools version exists for all platforms, False otherwise + List of platforms where the version exists """ agent_info = load_agent_build_info() - tools_base_url = "https://fastdl.mongodb.org/tools/db" + available_platforms = [] for platform in platforms: if platform not in agent_info["platform_mappings"]: - logger.warning(f"Platform {platform} not found in agent mappings, skipping tools validation") + logger.warning(f"Platform {platform} not found in agent mappings, skipping") continue - mapping = agent_info["platform_mappings"][platform] - tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) - tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - tools_url = f"{tools_base_url}/{tools_filename}" + filenames = filenames_builder(agent_info, version, platform) + platform_found = False + + for filename in filenames: + url = f"{base_url}/{filename}" + + try: + response = requests.head(url, timeout=30) + if response.status_code == 200: + available_platforms.append(platform) + logger.debug( + f"{version_type.title()} version {version} available for platform {platform} using {filename}" + ) + platform_found = True + break + else: + logger.debug( + f"{version_type.title()} version {version} not found for platform {platform} at {url} (HTTP {response.status_code})" + ) + except requests.RequestException as e: + logger.debug( + f"Failed to validate {version_type} version {version} for platform {platform} at {url}: {e}" + ) + + if not platform_found: + logger.warning( + f"{version_type.title()} version {version} not found for platform {platform} (tried {len(filenames)} possibilities)" + ) + + return available_platforms + + +def get_available_platforms_for_agent(agent_version: str, platforms: List[str]) -> List[str]: + """ + Get the list of platforms where the agent version is actually available. + Tries multiple RHEL versions for each platform to find working binaries. + + Args: + agent_version: MongoDB agent version to check + platforms: List of platforms to check + + Returns: + List of platforms where the agent version exists + """ + agent_base_url = ( + "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" + ) + + return _get_available_platforms_with_fallback( + version=agent_version, + platforms=platforms, + base_url=agent_base_url, + filenames_builder=_build_agent_filenames, + version_type="agent", + ) + + +def _build_tools_filenames(agent_info: dict, tools_version: str, platform: str) -> List[str]: + """Build all possible tools filenames for a given platform and version.""" + mapping = agent_info["platform_mappings"][platform] + filenames = [] + + # Try the current tools suffix first + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + filenames.append(f"{agent_info['base_names']['tools']}-{tools_suffix}") + + # Try the old tools suffix as fallback + if "tools_suffix_old" in mapping: + tools_suffix_old = mapping["tools_suffix_old"].replace("{TOOLS_VERSION}", tools_version) + filenames.append(f"{agent_info['base_names']['tools']}-{tools_suffix_old}") + + return filenames + + +def get_working_agent_filename(agent_version: str, platform: str) -> str: + """ + Get the actual working agent filename for a specific platform and version. + Tries multiple RHEL versions and returns the first one that works. + + Args: + agent_version: MongoDB agent version to check + platform: Platform to check + + Returns: + The working filename, or the first filename if none work + """ + agent_base_url = ( + "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" + ) + + agent_info = load_agent_build_info() + + if platform not in agent_info["platform_mappings"]: + logger.warning(f"Platform {platform} not found in agent mappings") + return "" + + filenames = _build_agent_filenames(agent_info, agent_version, platform) + + for filename in filenames: + url = f"{agent_base_url}/{filename}" try: - # Use HEAD request to check if URL exists without downloading the file - response = requests.head(tools_url, timeout=30) - if response.status_code != 200: - logger.warning(f"Tools version {tools_version} not found for platform {platform} at {tools_url} (HTTP {response.status_code})") - return False - logger.debug(f"Tools version {tools_version} validated for platform {platform}") - except requests.RequestException as e: - logger.warning(f"Failed to validate tools version {tools_version} for platform {platform}: {e}") - return False + response = requests.head(url, timeout=30) + if response.status_code == 200: + logger.debug(f"Found working agent filename for {platform}: {filename}") + return filename + except requests.RequestException: + continue - logger.info(f"Tools version {tools_version} validated for all platforms: {platforms}") - return True + # If none work, return empty string to indicate platform should be skipped + logger.warning(f"No working agent filename found for {platform}, platform will be skipped") + return "" + + +def get_working_tools_filename(tools_version: str, platform: str) -> str: + """ + Get the actual working tools filename for a specific platform and version. + Tries multiple RHEL versions and returns the first one that works. + + Args: + tools_version: MongoDB tools version to check + platform: Platform to check + + Returns: + The working filename, or the first filename if none work + """ + tools_base_url = "https://fastdl.mongodb.org/tools/db" + + agent_info = load_agent_build_info() + + if platform not in agent_info["platform_mappings"]: + logger.warning(f"Platform {platform} not found in agent mappings") + return "" + + filenames = _build_tools_filenames(agent_info, tools_version, platform) + + for filename in filenames: + url = f"{tools_base_url}/{filename}" + + try: + response = requests.head(url, timeout=30) + if response.status_code == 200: + logger.debug(f"Found working tools filename for {platform}: {filename}") + return filename + except requests.RequestException: + continue + + # If none work, return empty string to indicate platform should be skipped + logger.warning(f"No working tools filename found for {platform}, platform will be skipped") + return "" + + +def get_available_platforms_for_tools(tools_version: str, platforms: List[str]) -> List[str]: + """ + Get the list of platforms where the tools version is actually available. + Tries multiple RHEL versions for each platform to find working binaries. + + Args: + tools_version: MongoDB tools version to check + platforms: List of platforms to check + + Returns: + List of platforms where the tools version exists + """ + tools_base_url = "https://fastdl.mongodb.org/tools/db" + + return _get_available_platforms_with_fallback( + version=tools_version, + platforms=platforms, + base_url=tools_base_url, + filenames_builder=_build_tools_filenames, + version_type="tools", + ) diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 28c9d218f..2b5b66939 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -5,6 +5,7 @@ import json import os import shutil +import sys from concurrent.futures import ProcessPoolExecutor from copy import copy from queue import Queue @@ -14,7 +15,18 @@ from opentelemetry import trace from lib.base_logger import logger -from scripts.release.agent.validation import validate_agent_version_exists, validate_tools_version_exists,load_agent_build_info +from scripts.release.agent.detect_ops_manager_changes import ( + detect_ops_manager_changes, + get_all_agents_for_rebuild, + get_currently_used_agents, +) +from scripts.release.agent.validation import ( + get_available_platforms_for_agent, + get_available_platforms_for_tools, + get_working_agent_filename, + get_working_tools_filename, + load_agent_build_info, +) from scripts.release.build.image_build_configuration import ImageBuildConfiguration from scripts.release.build.image_build_process import execute_docker_build from scripts.release.build.image_signing import ( @@ -22,11 +34,6 @@ sign_image, verify_signature, ) -from scripts.release.agent.detect_ops_manager_changes import ( - detect_ops_manager_changes, - get_currently_used_agents, - get_all_agents_for_rebuild, -) TRACER = trace.get_tracer("evergreen-agent") @@ -62,15 +69,16 @@ def generate_tools_build_args(platforms: List[str], tools_version: str) -> Dict[ for platform in platforms: if platform not in agent_info["platform_mappings"]: - logger.warning(f"Platform {platform} not found in agent mappings, skipping") - continue + logger.error(f"Platform {platform} not found in agent mappings, skipping") + sys.exit(1) mapping = agent_info["platform_mappings"][platform] + arch = platform.split("/")[-1] tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - build_args[f"mongodb_tool_version_{arch}"] = tools_filename + build_args[f"mongodb_tools_version_{arch}"] = tools_filename return build_args @@ -95,15 +103,20 @@ def generate_agent_build_args(platforms: List[str], agent_version: str, tools_ve logger.warning(f"Platform {platform} not found in agent mappings, skipping") continue - mapping = agent_info["platform_mappings"][platform] arch = platform.split("/")[-1] - agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" - build_args[f"mongodb_agent_version_{arch}"] = agent_filename + agent_filename = get_working_agent_filename(agent_version, platform) + tools_filename = get_working_tools_filename(tools_version, platform) - tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) - tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - build_args[f"mongodb_tool_version_{arch}"] = tools_filename + # Only add build args if we have valid filenames + if agent_filename and tools_filename: + build_args[f"mongodb_agent_version_{arch}"] = agent_filename + build_args[f"mongodb_tools_version_{arch}"] = tools_filename + logger.debug(f"Added build args for {platform}: agent={agent_filename}, tools={tools_filename}") + else: + logger.warning(f"Skipping build args for {platform} - missing agent or tools filename") + logger.debug(f" agent_filename: {agent_filename}") + logger.debug(f" tools_filename: {tools_filename}") return build_args @@ -290,7 +303,6 @@ def build_om_image(build_configuration: ImageBuildConfiguration): if om_version is None: raise ValueError("`om_version` should be defined.") - # Set the version in the build configuration (it is not provided in the build_configuration) build_configuration.version = om_version om_download_url = os.environ.get("om_download_url", "") @@ -312,11 +324,9 @@ def build_init_appdb_image(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db" - # Extract tools version and generate platform-specific build args tools_version = extract_tools_version_from_release(release) - # Validate that the tools version exists before attempting to build - if not validate_tools_version_exists(tools_version, build_configuration.platforms): + if not get_available_platforms_for_tools(tools_version, build_configuration.platforms): logger.warning(f"Skipping build for init-appdb - tools version {tools_version} not found in repository") return @@ -336,16 +346,14 @@ def build_init_appdb_image(build_configuration: ImageBuildConfiguration): ) -# TODO: nam static: remove this once static containers becomes the default def build_init_database_image(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db" - # Extract tools version and generate platform-specific build args tools_version = extract_tools_version_from_release(release) # Validate that the tools version exists before attempting to build - if not validate_tools_version_exists(tools_version, build_configuration.platforms): + if not get_available_platforms_for_tools(tools_version, build_configuration.platforms): logger.warning(f"Skipping build for init-database - tools version {tools_version} not found in repository") return @@ -424,20 +432,39 @@ def build_agent(build_configuration: ImageBuildConfiguration): tools_version = agent_tools_version[1] logger.info(f"======= Building Agent {agent_tools_version} ({idx + 1}/{len(agent_versions_to_build)})") - if not validate_agent_version_exists(agent_version, build_configuration.platforms): - logger.warning(f"Skipping agent version {agent_version} - not found in repository") + available_agent_platforms = get_available_platforms_for_agent(agent_version, build_configuration.platforms) + available_tools_platforms = get_available_platforms_for_tools(tools_version, build_configuration.platforms) + + available_platforms = list(set(available_agent_platforms) & set(available_tools_platforms)) + + # Check if amd64 is available - if not, skip the entire build + if "linux/amd64" not in available_platforms: + logger.warning( + f"Skipping agent version {agent_version} - amd64 platform not available (required platform)" + ) + if available_platforms: + logger.info(f" Other available platforms were: {available_platforms}") skipped_builds.append(agent_tools_version) continue - if not validate_tools_version_exists(tools_version, build_configuration.platforms): - logger.warning(f"Skipping agent version {agent_version} - tools version {tools_version} not found in repository") + if not available_platforms: + logger.warning( + f"Skipping agent version {agent_version} - no platforms available for both agent and tools" + ) skipped_builds.append(agent_tools_version) continue + if available_platforms != build_configuration.platforms: + logger.info( + f"Building agent {agent_version} for available platforms: {available_platforms} " + f"(skipping: {set(build_configuration.platforms) - set(available_platforms)})" + ) + successful_builds.append(agent_tools_version) _build_agent( agent_tools_version, build_configuration, + available_platforms, executor, tasks_queue, ) @@ -452,22 +479,27 @@ def build_agent(build_configuration: ImageBuildConfiguration): def _build_agent( agent_tools_version: Tuple[str, str], build_configuration: ImageBuildConfiguration, + available_platforms: List[str], executor: ProcessPoolExecutor, tasks_queue: Queue, ): agent_version = agent_tools_version[0] tools_version = agent_tools_version[1] - tasks_queue.put(executor.submit(build_agent_pipeline, build_configuration, agent_version, tools_version)) + tasks_queue.put( + executor.submit(build_agent_pipeline, build_configuration, agent_version, tools_version, available_platforms) + ) def build_agent_pipeline( build_configuration: ImageBuildConfiguration, agent_version: str, tools_version: str, + available_platforms: List[str], ): build_configuration_copy = copy(build_configuration) build_configuration_copy.version = agent_version + build_configuration_copy.platforms = available_platforms # Use only available platforms print( f"======== Building agent pipeline for version {agent_version}, build configuration version: {build_configuration.version}" ) @@ -475,7 +507,7 @@ def build_agent_pipeline( # Note: Validation is now done earlier in the build_agent function # Generate platform-specific build arguments using the mapping platform_build_args = generate_agent_build_args( - platforms=build_configuration.platforms, agent_version=agent_version, tools_version=tools_version + platforms=available_platforms, agent_version=agent_version, tools_version=tools_version ) agent_base_url = ( diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py index 4e81abac6..4d6c09157 100644 --- a/scripts/release/atomic_pipeline_test.py +++ b/scripts/release/atomic_pipeline_test.py @@ -7,6 +7,7 @@ import unittest from unittest.mock import patch + class TestAgentBuildMapping(unittest.TestCase): """Test cases for agent build mapping functionality.""" @@ -16,164 +17,12 @@ def setUp(self): with open("build_info_agent.json", "r") as f: self.agent_build_info = json.load(f) - def test_generate_agent_build_args_empty_platforms(self): - """Test generating build args with empty platforms list.""" - platforms = [] - agent_version = "108.0.7.8810-1" - tools_version = "100.12.0" - - result = generate_agent_build_args(platforms, agent_version, tools_version) - - self.assertEqual(result, {}) - - def test_build_args_match_dockerfile_requirements(self): - """Test that generated build args exactly match what the Dockerfile expects.""" - # Define the expected build args based on the platforms we support - # This is cleaner than parsing the Dockerfile and more explicit about our expectations - expected_dockerfile_args = { - "mongodb_agent_version_amd64", - "mongodb_agent_version_arm64", - "mongodb_agent_version_s390x", - "mongodb_agent_version_ppc64le", - "mongodb_tools_version_amd64", - "mongodb_tools_version_arm64", - "mongodb_tools_version_s390x", - "mongodb_tools_version_ppc64le", - } - - # Generate build args for all platforms - platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] - agent_version = "108.0.7.8810-1" - tools_version = "100.12.0" - - result = generate_agent_build_args(platforms, agent_version, tools_version) - generated_build_args = set(result.keys()) - - # Verify that we generate exactly the build args the Dockerfile expects - self.assertEqual( - generated_build_args, - expected_dockerfile_args, - f"Generated build args {generated_build_args} don't match expected {expected_dockerfile_args}", - ) - - # Verify the format of generated filenames matches what Dockerfile expects - for arg_name, filename in result.items(): - if "agent" in arg_name: - self.assertTrue(filename.startswith("mongodb-mms-automation-agent-")) - self.assertTrue(filename.endswith(".tar.gz")) - elif "tools" in arg_name: - self.assertTrue(filename.startswith("mongodb-database-tools-")) - self.assertTrue(filename.endswith(".tgz")) - - def test_dockerfile_contains_expected_args(self): - """Test that the Dockerfile actually contains the build args we expect.""" - dockerfile_path = "docker/mongodb-agent/Dockerfile.atomic" - - # Read the Dockerfile content - with open(dockerfile_path, "r") as f: - dockerfile_content = f.read() - - # Define the expected build args - expected_args = [ - "mongodb_agent_version_amd64", - "mongodb_agent_version_arm64", - "mongodb_agent_version_s390x", - "mongodb_agent_version_ppc64le", - "mongodb_tools_version_amd64", - "mongodb_tools_version_arm64", - "mongodb_tools_version_s390x", - "mongodb_tools_version_ppc64le", - ] - - # Verify each expected arg is declared in the Dockerfile - for arg_name in expected_args: - self.assertIn( - f"ARG {arg_name}", dockerfile_content, f"Dockerfile should contain 'ARG {arg_name}' declaration" - ) - - def test_generate_tools_build_args(self): - """Test generating tools-only build args.""" - platforms = ["linux/amd64", "linux/arm64"] - tools_version = "100.12.0" - - result = generate_tools_build_args(platforms, tools_version) - - expected = { - "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", - "mongodb_tools_version_arm64": "mongodb-database-tools-rhel88-aarch64-100.12.0.tgz", - } - - self.assertEqual(result, expected) - - def test_extract_tools_version_from_release(self): - """Test extracting tools version from release.json structure.""" - release = {"mongodbToolsBundle": {"ubi": "mongodb-database-tools-rhel88-x86_64-100.12.2.tgz"}} - - result = extract_tools_version_from_release(release) - self.assertEqual(result, "100.12.2") - - def test_tools_build_args_match_init_dockerfiles(self): - """Test that tools build args match what init-database and init-appdb Dockerfiles expect.""" - platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] - tools_version = "100.12.0" - - result = generate_tools_build_args(platforms, tools_version) - - # Verify all expected tools build args are present (no agent args) - expected_tools_args = { - "mongodb_tools_version_amd64", - "mongodb_tools_version_arm64", - "mongodb_tools_version_s390x", - "mongodb_tools_version_ppc64le", - } - - generated_args = set(result.keys()) - self.assertEqual(generated_args, expected_tools_args) - - # Verify no agent args are included - for arg_name in result.keys(): - self.assertIn("tools", arg_name) - self.assertNotIn("agent", arg_name) - - def test_url_construction_correctness(self): - """Test that URLs are constructed correctly with proper trailing slashes.""" - # Test agent build args URL construction - platforms = ["linux/amd64"] - agent_version = "108.0.12.8846-1" - tools_version = "100.12.2" - - result = generate_agent_build_args(platforms, agent_version, tools_version) - - agent_base_url = ( - "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" - ) - tools_base_url = "https://fastdl.mongodb.org/tools/db" - - agent_filename = result["mongodb_agent_version_amd64"] - tools_filename = result["mongodb_tools_version_amd64"] - - # Test URL construction (what happens in Dockerfile: ${base_url}/${filename}) - agent_url = f"{agent_base_url}/{agent_filename}" - tools_url = f"{tools_base_url}/{tools_filename}" - - expected_agent_url = "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-108.0.12.8846-1.linux_x86_64.tar.gz" - expected_tools_url = "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" - - self.assertEqual(agent_url, expected_agent_url) - self.assertEqual(tools_url, expected_tools_url) - - # Verify no double slashes (common mistake) - self.assertNotIn("//", agent_url.replace("https://", "")) - self.assertNotIn("//", tools_url.replace("https://", "")) - - # Verify base URLs do NOT end with slash (to avoid double slashes in Dockerfile) - self.assertFalse(agent_base_url.endswith("/")) - self.assertFalse(tools_base_url.endswith("/")) - def test_agent_version_validation(self): """Test that agent version validation works correctly.""" - from scripts.release.agent.validation import validate_tools_version_exists - from scripts.release.agent.validation import validate_agent_version_exists + from scripts.release.agent.validation import ( + validate_agent_version_exists, + validate_tools_version_exists, + ) platforms = ["linux/amd64"] diff --git a/scripts/release/tests/test_detect_ops_manager_changes.py b/scripts/release/tests/test_detect_ops_manager_changes.py index 8606c4424..af8c173a3 100644 --- a/scripts/release/tests/test_detect_ops_manager_changes.py +++ b/scripts/release/tests/test_detect_ops_manager_changes.py @@ -1,15 +1,13 @@ #!/usr/bin/env python3 """ -Tests for scripts.release.detect_ops_manager_changes.py +Tests for scripts.release.agent.detect_ops_manager_changes.py """ import json -import os import subprocess -import sys import unittest from unittest.mock import MagicMock, mock_open, patch -from scripts.release.detect_ops_manager_changes import ( +from scripts.release.agent.detect_ops_manager_changes import ( detect_ops_manager_changes, extract_ops_manager_mapping, get_content_from_git, @@ -129,11 +127,11 @@ def test_no_changes_detected(self): """Test when no changes are detected""" with ( patch( - "scripts.release.detect_ops_manager_changes.load_current_release_json", + "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", return_value=self.current_release_data, ), patch( - "scripts.release.detect_ops_manager_changes.load_release_json_from_master", + "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", return_value=self.master_release_data, ), ): @@ -151,10 +149,11 @@ def test_new_ops_manager_version_added(self): with ( patch( - "scripts.release.detect_ops_manager_changes.load_current_release_json", return_value=modified_current + "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", + return_value=modified_current, ), patch( - "scripts.release.detect_ops_manager_changes.load_release_json_from_master", + "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", return_value=self.master_release_data, ), ): @@ -163,7 +162,7 @@ def test_new_ops_manager_version_added(self): self.assertIn(("108.0.0.8694-1", "100.10.0"), changed_agents) def test_ops_manager_version_modified(self): - """Test detection when OM version is modified""" + """Test that modifying existing OM version is NOT detected (only new versions are detected)""" modified_current = json.loads(json.dumps(self.current_release_data)) modified_current["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"]["6.0.26"][ "agent_version" @@ -171,16 +170,18 @@ def test_ops_manager_version_modified(self): with ( patch( - "scripts.release.detect_ops_manager_changes.load_current_release_json", return_value=modified_current + "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", + return_value=modified_current, ), patch( - "scripts.release.detect_ops_manager_changes.load_release_json_from_master", + "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", return_value=self.master_release_data, ), ): changed_agents = detect_ops_manager_changes() - self.assertIn(("12.0.35.7911-1", "100.10.0"), changed_agents) + # Modified existing OM versions should NOT be detected + self.assertEqual(changed_agents, []) def test_cloud_manager_changed(self): """Test detection when cloud_manager is changed""" @@ -189,10 +190,11 @@ def test_cloud_manager_changed(self): with ( patch( - "scripts.release.detect_ops_manager_changes.load_current_release_json", return_value=modified_current + "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", + return_value=modified_current, ), patch( - "scripts.release.detect_ops_manager_changes.load_release_json_from_master", + "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", return_value=self.master_release_data, ), ): @@ -207,10 +209,11 @@ def test_cloud_manager_tools_changed(self): with ( patch( - "scripts.release.detect_ops_manager_changes.load_current_release_json", return_value=modified_current + "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", + return_value=modified_current, ), patch( - "scripts.release.detect_ops_manager_changes.load_release_json_from_master", + "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", return_value=self.master_release_data, ), ): @@ -218,6 +221,27 @@ def test_cloud_manager_tools_changed(self): changed_agents = detect_ops_manager_changes() self.assertIn(("13.37.0.9590-1", "100.13.0"), changed_agents) + def test_cloud_manager_downgrade_not_detected(self): + """Test that cloud manager downgrade is NOT detected""" + modified_current = json.loads(json.dumps(self.current_release_data)) + # Downgrade from 13.37.0.9590-1 to 13.36.0.9500-1 + modified_current["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager"] = "13.36.0.9500-1" + + with ( + patch( + "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", + return_value=modified_current, + ), + patch( + "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", + return_value=self.master_release_data, + ), + ): + + changed_agents = detect_ops_manager_changes() + # Downgrade should NOT be detected + self.assertEqual(changed_agents, []) + def test_ops_manager_version_removed(self): """Test detection when OM version is removed""" modified_current = json.loads(json.dumps(self.current_release_data)) @@ -225,10 +249,11 @@ def test_ops_manager_version_removed(self): with ( patch( - "scripts.release.detect_ops_manager_changes.load_current_release_json", return_value=modified_current + "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", + return_value=modified_current, ), patch( - "scripts.release.detect_ops_manager_changes.load_release_json_from_master", + "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", return_value=self.master_release_data, ), ): @@ -247,10 +272,11 @@ def test_both_om_and_cm_changed(self): with ( patch( - "scripts.release.detect_ops_manager_changes.load_current_release_json", return_value=modified_current + "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", + return_value=modified_current, ), patch( - "scripts.release.detect_ops_manager_changes.load_release_json_from_master", + "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", return_value=self.master_release_data, ), ): @@ -263,9 +289,9 @@ def test_both_om_and_cm_changed(self): def test_current_release_load_failure(self): """Test handling when current release.json cannot be loaded""" with ( - patch("scripts.release.detect_ops_manager_changes.load_current_release_json", return_value=None), + patch("scripts.release.agent.detect_ops_manager_changes.load_current_release_json", return_value=None), patch( - "scripts.release.detect_ops_manager_changes.load_release_json_from_master", + "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", return_value=self.master_release_data, ), ): @@ -277,10 +303,10 @@ def test_base_release_load_failure_fail_safe(self): """Test fail-safe behavior when base release.json cannot be loaded""" with ( patch( - "scripts.release.detect_ops_manager_changes.load_current_release_json", + "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", return_value=self.current_release_data, ), - patch("scripts.release.detect_ops_manager_changes.load_release_json_from_master", return_value=None), + patch("scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", return_value=None), ): changed_agents = detect_ops_manager_changes() From 9368a9382652767147b0a7eaf950ccf973741354 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Tue, 19 Aug 2025 17:34:09 +0200 Subject: [PATCH 147/164] Remove multi-arch from init-om image --- build_info.json | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/build_info.json b/build_info.json index 506ca3d15..4b08a2835 100644 --- a/build_info.json +++ b/build_info.json @@ -118,30 +118,21 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "staging": { "sign": true, "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-ops-manager", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "release": { "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-init-ops-manager", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] } }, From af2eee30e57a8c234c87ff170d9faa2a3505e34d Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 19 Aug 2025 20:08:20 +0200 Subject: [PATCH 148/164] ibm power only support 8.x.x and reset agent pins --- docker/mongodb-agent/Dockerfile.atomic | 6 +++--- scripts/dev/contexts/e2e_static_smoke_ibm_power | 3 ++- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/docker/mongodb-agent/Dockerfile.atomic b/docker/mongodb-agent/Dockerfile.atomic index 645dec816..7dc742342 100644 --- a/docker/mongodb-agent/Dockerfile.atomic +++ b/docker/mongodb-agent/Dockerfile.atomic @@ -1,4 +1,4 @@ -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.0.0 AS tools_downloader +FROM registry.access.redhat.com/ubi9/ubi-minimal AS tools_downloader ARG TARGETPLATFORM ARG mongodb_tools_url @@ -23,7 +23,7 @@ RUN case ${TARGETPLATFORM} in \ RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ && rm /tools/mongodb_tools.tgz -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.0.0 AS agent_downloader +FROM registry.access.redhat.com/ubi9/ubi-minimal AS agent_downloader ARG TARGETPLATFORM ARG mongodb_agent_url @@ -51,7 +51,7 @@ RUN tar xfz /agent/mongodb_agent.tgz \ && rm /agent/mongodb_agent.tgz \ && rm -r mongodb-mms-automation-agent-* -FROM registry.access.redhat.com/ubi9/ubi-minimal:9.0.0 +FROM registry.access.redhat.com/ubi9/ubi-minimal ARG TARGETARCH diff --git a/scripts/dev/contexts/e2e_static_smoke_ibm_power b/scripts/dev/contexts/e2e_static_smoke_ibm_power index d80fd7625..01656be27 100644 --- a/scripts/dev/contexts/e2e_static_smoke_ibm_power +++ b/scripts/dev/contexts/e2e_static_smoke_ibm_power @@ -13,4 +13,5 @@ export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') export CUSTOM_OM_VERSION export MDB_DEFAULT_ARCHITECTURE=static - +# MDB doesn't support power for 7.x.x +export CUSTOM_MDB_VERSION=8.0.12 From 489c79186d6858a27590e1e3c7cf57bebae775dd Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 20 Aug 2025 16:06:38 +0200 Subject: [PATCH 149/164] Multi refactoring arch (#354) # Summary This pull request refactors the agent build and validation logic to improve multi-architecture support and maintainability. It centralizes platform mapping logic, simplifies platform-specific filename resolution, and updates configuration files to reflect platform support changes. Additionally, it documents the new multi-architecture capabilities in the changelog. **Multi-architecture support and configuration updates:** * Updated `build_info.json` to restrict supported platforms for all images to only `linux/amd64`, removing `arm64`, `s390x`, and `ppc64le` from all relevant entries. [[1]](diffhunk://#diff-ac3b10589f332066db566cb08c01afea746d5f16e704beef946107a0c5de5225L8-R8) [[2]](diffhunk://#diff-ac3b10589f332066db566cb08c01afea746d5f16e704beef946107a0c5de5225L28-R25) [[3]](diffhunk://#diff-ac3b10589f332066db566cb08c01afea746d5f16e704beef946107a0c5de5225L57-R51) [[4]](diffhunk://#diff-ac3b10589f332066db566cb08c01afea746d5f16e704beef946107a0c5de5225L89-R80) [[5]](diffhunk://#diff-ac3b10589f332066db566cb08c01afea746d5f16e704beef946107a0c5de5225L144-R132) [[6]](diffhunk://#diff-ac3b10589f332066db566cb08c01afea746d5f16e704beef946107a0c5de5225L208-R193) [[7]](diffhunk://#diff-ac3b10589f332066db566cb08c01afea746d5f16e704beef946107a0c5de5225L241-R223) [[8]](diffhunk://#diff-ac3b10589f332066db566cb08c01afea746d5f16e704beef946107a0c5de5225L274-R253) * Changed the key in `build_info_agent.json` from `platform_mappings` to `platforms` for consistency and clarity. **Agent build and validation code refactoring:** * Introduced a `PlatformConfiguration` class in `validation.py` to encapsulate platform mapping logic, lazy-load build info, and provide unified methods for generating agent and tools build arguments. * Refactored functions to use the new `platforms` key and centralized filename resolution logic, including helper methods for finding valid filenames and checking URL existence. [[1]](diffhunk://#diff-f3a4c7975fde6557a1d05e5170380f65cfef432faf4e5441a1538d003527e449L3-R181) [[2]](diffhunk://#diff-f3a4c7975fde6557a1d05e5170380f65cfef432faf4e5441a1538d003527e449L95-R210) [[3]](diffhunk://#diff-f3a4c7975fde6557a1d05e5170380f65cfef432faf4e5441a1538d003527e449L162-R243) [[4]](diffhunk://#diff-f3a4c7975fde6557a1d05e5170380f65cfef432faf4e5441a1538d003527e449L193-R274) [[5]](diffhunk://#diff-f3a4c7975fde6557a1d05e5170380f65cfef432faf4e5441a1538d003527e449L231-R291) * Updated import statements and removed now-redundant code in `atomic_pipeline.py` to use the new build argument generation functions from the refactored validation logic. [[1]](diffhunk://#diff-f8ee125fd34cdec2e03d7e7282177410cfd21756f4ba3a1ce4f8f2062bfee792R23-L28) [[2]](diffhunk://#diff-f8ee125fd34cdec2e03d7e7282177410cfd21756f4ba3a1ce4f8f2062bfee792L56-L122) * Removed unused imports and cleaned up code in `atomic_pipeline.py`. **Documentation:** * Added a changelog entry describing the addition of comprehensive multi-architecture support for the Kubernetes operator and its images. ## Proof of Work - green ci ## Checklist - [ ] Have you linked a jira ticket and/or is the ticket in the title? - [ ] Have you checked whether your jira ticket required DOCSP changes? - [ ] Have you added changelog file? - use `skip-changelog` label if not needed - refer to [Changelog files and Release Notes](https://github.com/mongodb/mongodb-kubernetes/blob/master/CONTRIBUTING.md#changelog-files-and-release-notes) section in CONTRIBUTING.md for more details --- build_info.json | 38 +-- build_info_agent.json | 14 +- .../20250820_feature_multi_arch_support.md | 9 + .../agent/detect_ops_manager_changes.py | 149 --------- scripts/release/agent/validation.py | 294 ++++++++++-------- scripts/release/atomic_pipeline.py | 80 +---- scripts/release/atomic_pipeline_test.py | 192 ++++++++++-- scripts/release/tests/build_info_test.py | 34 +- scripts/release/tests/release_info_test.py | 14 +- .../tests/test_detect_ops_manager_changes.py | 40 --- 10 files changed, 380 insertions(+), 484 deletions(-) create mode 100644 changelog/20250820_feature_multi_arch_support.md diff --git a/build_info.json b/build_info.json index 4b08a2835..bead3e240 100644 --- a/build_info.json +++ b/build_info.json @@ -5,10 +5,7 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "staging": { @@ -54,11 +51,8 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" - ] + "linux/amd64" + ] }, "staging": { "sign": true, @@ -86,10 +80,7 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "staging": { @@ -141,10 +132,7 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "staging": { @@ -173,7 +161,6 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-tests", "platforms": [ - "linux/arm64", "linux/amd64" ] }, @@ -205,10 +192,7 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "staging": { @@ -238,10 +222,7 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "staging": { @@ -271,10 +252,7 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", "platforms": [ - "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "staging": { diff --git a/build_info_agent.json b/build_info_agent.json index 3a7906fd1..3070937ae 100644 --- a/build_info_agent.json +++ b/build_info_agent.json @@ -1,24 +1,20 @@ { - "platform_mappings": { + "platforms": { "linux/amd64": { "agent_suffixes": ["linux_x86_64.tar.gz"], - "tools_suffix": "rhel93-x86_64-{TOOLS_VERSION}.tgz", - "tools_suffix_old": "rhel90-x86_64-{TOOLS_VERSION}.tgz" + "tools_suffixes": ["rhel93-x86_64-{TOOLS_VERSION}.tgz", "rhel90-x86_64-{TOOLS_VERSION}.tgz"] }, "linux/arm64": { "agent_suffixes": ["amzn2_aarch64.tar.gz"], - "tools_suffix": "rhel93-aarch64-{TOOLS_VERSION}.tgz", - "tools_suffix_old": "rhel90-aarch64-{TOOLS_VERSION}.tgz" + "tools_suffixes": ["rhel93-aarch64-{TOOLS_VERSION}.tgz", "rhel90-aarch64-{TOOLS_VERSION}.tgz"] }, "linux/s390x": { "agent_suffixes": ["rhel7_s390x.tar.gz", "rhel8_s390x.tar.gz", "rhel9_s390x.tar.gz"], - "tools_suffix": "rhel9-s390x-{TOOLS_VERSION}.tgz", - "tools_suffix_old": "rhel83-s390x-{TOOLS_VERSION}.tgz" + "tools_suffixes": ["rhel9-s390x-{TOOLS_VERSION}.tgz", "rhel83-s390x-{TOOLS_VERSION}.tgz"] }, "linux/ppc64le": { "agent_suffixes": ["rhel8_ppc64le.tar.gz", "rhel7_ppc64le.tar.gz", "rhel9_ppc64le.tar.gz"], - "tools_suffix": "rhel9-ppc64le-{TOOLS_VERSION}.tgz", - "tools_suffix_old": "rhel81-ppc64le-{TOOLS_VERSION}.tgz" + "tools_suffixes": ["rhel9-ppc64le-{TOOLS_VERSION}.tgz", "rhel81-ppc64le-{TOOLS_VERSION}.tgz"] } }, "base_names": { diff --git a/changelog/20250820_feature_multi_arch_support.md b/changelog/20250820_feature_multi_arch_support.md new file mode 100644 index 000000000..9f48dbe10 --- /dev/null +++ b/changelog/20250820_feature_multi_arch_support.md @@ -0,0 +1,9 @@ +--- +title: multi-arch support +kind: feature +date: 2025-08-20 +--- + +# Multi-Architecture Support +We've added comprehensive multi-architecture support for the kubernetes operator. This enhancement enables deployment on IBM Power (ppc64le) and IBM Z (s390x) architectures alongside +existing x86_64 support. All core images (operator, agent, init containers, database, readiness probe) now support multiple architectures diff --git a/scripts/release/agent/detect_ops_manager_changes.py b/scripts/release/agent/detect_ops_manager_changes.py index 4f3f6f033..b5787c8e3 100644 --- a/scripts/release/agent/detect_ops_manager_changes.py +++ b/scripts/release/agent/detect_ops_manager_changes.py @@ -264,152 +264,3 @@ def detect_ops_manager_changes() -> List[Tuple[str, str]]: return get_changed_agents(current_mapping, base_mapping) else: return [] - - -def get_tools_version_for_agent(agent_version: str) -> str: - """Get tools version for a given agent version from release.json""" - release_data = load_current_release_json() - if not release_data: - return "100.12.2" # Default fallback - - ops_manager_mapping = extract_ops_manager_mapping(release_data) - ops_manager_versions = ops_manager_mapping.get("ops_manager", {}) - - # Search through all OM versions to find matching agent version - for om_version, agent_tools in ops_manager_versions.items(): - if agent_tools.get("agent_version") == agent_version: - return agent_tools.get("tools_version", "100.12.2") - - # Check cloud_manager tools version as fallback - return ops_manager_mapping.get("cloud_manager_tools", "100.12.2") - - -def get_all_agents_for_rebuild() -> List[Tuple[str, str]]: - """Returns list of (agent_version, tools_version) tuples for all agents in release.json""" - agents = [] - - release_data = load_current_release_json() - if not release_data: - logger.error("Could not load release.json") - return [] - - ops_manager_mapping = extract_ops_manager_mapping(release_data) - - # Get all ops_manager agents - ops_manager_versions = ops_manager_mapping.get("ops_manager", {}) - for om_version, agent_tools in ops_manager_versions.items(): - agent_version = agent_tools.get("agent_version") - tools_version = agent_tools.get("tools_version") - if agent_version and tools_version: - agents.append((agent_version, tools_version)) - - # Get cloud_manager agent - cloud_manager_agent = ops_manager_mapping.get("cloud_manager") - cloud_manager_tools = ops_manager_mapping.get("cloud_manager_tools") - if cloud_manager_agent and cloud_manager_tools: - agents.append((cloud_manager_agent, cloud_manager_tools)) - - # Get the main agent version from release.json root - main_agent_version = release_data.get("agentVersion") - if main_agent_version: - tools_version = get_tools_version_for_agent(main_agent_version) - agents.append((main_agent_version, tools_version)) - - return list(set(agents)) - - -def get_currently_used_agents() -> List[Tuple[str, str]]: - """Returns list of (agent_version, tools_version) tuples for agents currently used in contexts and cloudmanager agent from release.json""" - logger.info("Getting currently used agents from contexts") - agents = [] - - try: - release_data = load_current_release_json() - if not release_data: - logger.error("Could not load release.json") - return [] - - ops_manager_mapping = extract_ops_manager_mapping(release_data) - ops_manager_versions = ops_manager_mapping.get("ops_manager", {}) - - # Search all context files - context_pattern = "scripts/dev/contexts/**/*" - context_files = glob.glob(context_pattern, recursive=True) - - for context_file in context_files: - if os.path.isfile(context_file): - try: - with open(context_file, "r") as f: - content = f.read() - - # Extract AGENT_VERSION from the context file - for line in content.split("\n"): - if line.startswith("export AGENT_VERSION="): - agent_version = line.split("=")[1].strip() - tools_version = get_tools_version_for_agent(agent_version) - agents.append((agent_version, tools_version)) - logger.info(f"Found agent {agent_version} in {context_file}") - break - - # Extract CUSTOM_OM_VERSION and map to agent version - for line in content.split("\n"): - if line.startswith("export CUSTOM_OM_VERSION="): - om_version = line.split("=")[1].strip() - if om_version in ops_manager_versions: - agent_tools = ops_manager_versions[om_version] - agent_version = agent_tools.get("agent_version") - tools_version = agent_tools.get("tools_version") - if agent_version and tools_version: - agents.append((agent_version, tools_version)) - logger.info( - f"Found OM version {om_version} -> agent {agent_version} in {context_file}" - ) - break - - except Exception as e: - logger.debug(f"Error reading context file {context_file}: {e}") - - # Also add the cloudmanager agent from release.json - cloud_manager_agent = ops_manager_mapping.get("cloud_manager") - cloud_manager_tools = ops_manager_mapping.get("cloud_manager_tools") - if cloud_manager_agent and cloud_manager_tools: - agents.append((cloud_manager_agent, cloud_manager_tools)) - logger.info(f"Found cloudmanager agent from release.json: {cloud_manager_agent}") - - # Also add the main agentVersion from release.json - main_agent_version = release_data.get("agentVersion") - if main_agent_version: - tools_version = get_tools_version_for_agent(main_agent_version) - agents.append((main_agent_version, tools_version)) - logger.info(f"Found main agent version from release.json: {main_agent_version}") - - unique_agents = list(set(agents)) - logger.info(f"Found {len(unique_agents)} currently used agents") - return unique_agents - - except Exception as e: - logger.error(f"Error getting currently used agents: {e}") - return [] - - -def detect_ops_manager_changes() -> List[Tuple[str, str]]: - """Returns (has_changes, changed_agents_list)""" - logger.info("=== Detecting OM Mapping Changes (Local vs Base) ===") - - current_release = load_current_release_json() - if not current_release: - logger.error("Could not load current local release.json") - return [] - - master_release = load_release_json_from_master() - if not master_release: - logger.warning("Could not load base release.json, assuming changes exist") - return [] - - current_mapping = extract_ops_manager_mapping(current_release) - base_mapping = extract_ops_manager_mapping(master_release) - - if current_mapping != base_mapping: - return get_changed_agents(current_mapping, base_mapping) - else: - return [] diff --git a/scripts/release/agent/validation.py b/scripts/release/agent/validation.py index 792b01f43..d95f71824 100644 --- a/scripts/release/agent/validation.py +++ b/scripts/release/agent/validation.py @@ -1,74 +1,182 @@ import json import sys -from typing import Callable, List +from typing import Callable, Dict, List import requests from lib.base_logger import logger -def load_agent_build_info(): - """Load agent platform mappings from build_info_agent.json""" +def _load_agent_build_info(): with open("build_info_agent.json", "r") as f: return json.load(f) -def _validate_version_exists( +class PlatformConfiguration: + def __init__(self): + self._agent_info = None + + @property + def agent_info(self): + """Lazy load agent build info to avoid repeated file reads.""" + if self._agent_info is None: + self._agent_info = _load_agent_build_info() + return self._agent_info + + def generate_tools_build_args(self, platforms: List[str], tools_version: str) -> Dict[str, str]: + """ + Generate build arguments for MongoDB tools based on platform mappings. + Uses the same validation logic to ensure consistency. + + Args: + platforms: List of platforms (e.g., ["linux/amd64", "linux/arm64"]) + tools_version: MongoDB tools version + + Returns: + Dictionary of build arguments for docker build (tools only) + """ + build_args = {} + + for platform in platforms: + if platform not in self.agent_info["platforms"]: + logger.error(f"Platform {platform} not found in agent mappings, skipping") + continue + + arch = platform.split("/")[-1] + + # Use the same logic as validation to get the working tools filename + tools_filename = get_working_tools_filename(tools_version, platform) + if tools_filename: + build_args[f"mongodb_tools_version_{arch}"] = tools_filename + else: + logger.error(f"No working tools filename found for platform {platform}") + continue + + return build_args + + def generate_agent_build_args(self, platforms: List[str], agent_version: str, tools_version: str) -> Dict[str, str]: + """ + Generate build arguments for agent image based on platform mappings. + + Args: + platforms: List of platforms (e.g., ["linux/amd64", "linux/arm64"]) + agent_version: MongoDB agent version + tools_version: MongoDB tools version + + Returns: + Dictionary of build arguments for docker build + """ + build_args = {} + + for platform in platforms: + if platform not in self.agent_info["platforms"]: + logger.warning(f"Platform {platform} not found in agent mappings, skipping") + continue + + arch = platform.split("/")[-1] + + agent_filename = get_working_agent_filename(agent_version, platform) + tools_filename = get_working_tools_filename(tools_version, platform) + + # Only add build args if we have valid filenames + if agent_filename and tools_filename: + build_args[f"mongodb_agent_version_{arch}"] = agent_filename + build_args[f"mongodb_tools_version_{arch}"] = tools_filename + logger.debug(f"Added build args for {platform}: agent={agent_filename}, tools={tools_filename}") + else: + logger.warning(f"Skipping build args for {platform} - missing agent or tools filename") + logger.debug(f" agent_filename: {agent_filename}") + logger.debug(f" tools_filename: {tools_filename}") + + return build_args + + +# Global instance for backward compatibility and ease of use +_platform_config = PlatformConfiguration() + + +def load_agent_build_info(): + """Load agent platform mappings from build_info_agent.json""" + return _platform_config.agent_info + + +def generate_tools_build_args(platforms: List[str], tools_version: str) -> Dict[str, str]: + """Generate build arguments for MongoDB tools based on platform mappings.""" + return _platform_config.generate_tools_build_args(platforms, tools_version) + + +def generate_agent_build_args(platforms: List[str], agent_version: str, tools_version: str) -> Dict[str, str]: + """Generate build arguments for agent image based on platform mappings.""" + return _platform_config.generate_agent_build_args(platforms, agent_version, tools_version) + + +def _build_agent_filenames(agent_info: dict, agent_version: str, platform: str) -> List[str]: + """Build all possible agent filenames for a given platform and version.""" + mapping = agent_info["platforms"][platform] + filenames = [] + for agent_suffix in mapping["agent_suffixes"]: + filename = f"{agent_info['base_names']['agent']}-{agent_version}.{agent_suffix}" + filenames.append(filename) + return filenames + + +def _build_tools_filenames(agent_info: dict, tools_version: str, platform: str) -> List[str]: + """Build all possible tools filenames for a given platform and version.""" + mapping = agent_info["platforms"][platform] + filenames = [] + for tools_suffix_template in mapping["tools_suffixes"]: + tools_suffix = tools_suffix_template.replace("{TOOLS_VERSION}", tools_version) + filenames.append(f"{agent_info['base_names']['tools']}-{tools_suffix}") + + return filenames + + +def _validate_url_exists(url: str, timeout: int = 30) -> bool: + try: + response = requests.head(url, timeout=timeout) + return response.status_code == 200 + except requests.RequestException: + return False + + +def _find_working_filename( version: str, - platforms: List[str], + platform: str, base_url: str, - filename_builder: Callable[[dict, str, str], str], + filenames_builder: Callable[[dict, str, str], List[str]], version_type: str, -) -> bool: +) -> str: """ - Generic validation function for checking if a version exists for all specified platforms. + Find the first working filename for a given platform and version. Args: - version: Version to validate - platforms: List of platforms to check + version: Version to check + platform: Platform to check base_url: Base URL for downloads - filename_builder: Function that builds filename from (agent_info, version, platform) + filenames_builder: Function that builds list of filenames from (agent_info, version, platform) version_type: Type of version being validated (for logging) - platform_not_found_action: Action when platform not found ("exit" or "continue") Returns: - True if version exists for all platforms, False otherwise + The working filename, or empty string if none work """ - agent_info = load_agent_build_info() - - for platform in platforms: - if platform not in agent_info["platform_mappings"]: - logger.error(f"Platform {platform} not found in agent mappings, skipping validation") - sys.exit(1) + agent_info = _platform_config.agent_info - filename = filename_builder(agent_info, version, platform) - url = f"{base_url}/{filename}" + if platform not in agent_info["platforms"]: + logger.warning(f"Platform {platform} not found in agent mappings") + return "" - try: - # Use HEAD request to check if URL exists without downloading the file - response = requests.head(url, timeout=30) - if response.status_code != 200: - logger.warning( - f"{version_type.title()} version {version} not found for platform {platform} at {url} (HTTP {response.status_code})" - ) - return False - logger.debug(f"{version_type.title()} version {version} validated for platform {platform}") - except requests.RequestException as e: - logger.warning(f"Failed to validate {version_type} version {version} for platform {platform}: {e}") - return False + filenames = filenames_builder(agent_info, version, platform) - logger.info(f"{version_type.title()} version {version} validated for all platforms: {platforms}") - return True + for filename in filenames: + url = f"{base_url}/{filename}" + if _validate_url_exists(url): + return filename + else: + logger.debug(f"{version_type.title()} version {version} not found for platform {platform} at {url}") -def _build_agent_filenames(agent_info: dict, agent_version: str, platform: str) -> List[str]: - """Build all possible agent filenames for a given platform and version.""" - mapping = agent_info["platform_mappings"][platform] - filenames = [] - for agent_suffix in mapping["agent_suffixes"]: - filename = f"{agent_info['base_names']['agent']}-{agent_version}.{agent_suffix}" - filenames.append(filename) - return filenames + logger.warning(f"No working {version_type} filename found for {platform}, platform will be skipped") + return "" def _get_available_platforms_with_fallback( @@ -92,41 +200,14 @@ def _get_available_platforms_with_fallback( Returns: List of platforms where the version exists """ - agent_info = load_agent_build_info() available_platforms = [] for platform in platforms: - if platform not in agent_info["platform_mappings"]: - logger.warning(f"Platform {platform} not found in agent mappings, skipping") - continue - - filenames = filenames_builder(agent_info, version, platform) - platform_found = False - - for filename in filenames: - url = f"{base_url}/{filename}" - - try: - response = requests.head(url, timeout=30) - if response.status_code == 200: - available_platforms.append(platform) - logger.debug( - f"{version_type.title()} version {version} available for platform {platform} using {filename}" - ) - platform_found = True - break - else: - logger.debug( - f"{version_type.title()} version {version} not found for platform {platform} at {url} (HTTP {response.status_code})" - ) - except requests.RequestException as e: - logger.debug( - f"Failed to validate {version_type} version {version} for platform {platform} at {url}: {e}" - ) - - if not platform_found: - logger.warning( - f"{version_type.title()} version {version} not found for platform {platform} (tried {len(filenames)} possibilities)" + working_filename = _find_working_filename(version, platform, base_url, filenames_builder, version_type) + if working_filename: + available_platforms.append(platform) + logger.debug( + f"{version_type.title()} version {version} available for platform {platform} using {working_filename}" ) return available_platforms @@ -157,23 +238,6 @@ def get_available_platforms_for_agent(agent_version: str, platforms: List[str]) ) -def _build_tools_filenames(agent_info: dict, tools_version: str, platform: str) -> List[str]: - """Build all possible tools filenames for a given platform and version.""" - mapping = agent_info["platform_mappings"][platform] - filenames = [] - - # Try the current tools suffix first - tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) - filenames.append(f"{agent_info['base_names']['tools']}-{tools_suffix}") - - # Try the old tools suffix as fallback - if "tools_suffix_old" in mapping: - tools_suffix_old = mapping["tools_suffix_old"].replace("{TOOLS_VERSION}", tools_version) - filenames.append(f"{agent_info['base_names']['tools']}-{tools_suffix_old}") - - return filenames - - def get_working_agent_filename(agent_version: str, platform: str) -> str: """ Get the actual working agent filename for a specific platform and version. @@ -190,28 +254,7 @@ def get_working_agent_filename(agent_version: str, platform: str) -> str: "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" ) - agent_info = load_agent_build_info() - - if platform not in agent_info["platform_mappings"]: - logger.warning(f"Platform {platform} not found in agent mappings") - return "" - - filenames = _build_agent_filenames(agent_info, agent_version, platform) - - for filename in filenames: - url = f"{agent_base_url}/{filename}" - - try: - response = requests.head(url, timeout=30) - if response.status_code == 200: - logger.debug(f"Found working agent filename for {platform}: {filename}") - return filename - except requests.RequestException: - continue - - # If none work, return empty string to indicate platform should be skipped - logger.warning(f"No working agent filename found for {platform}, platform will be skipped") - return "" + return _find_working_filename(agent_version, platform, agent_base_url, _build_agent_filenames, "agent") def get_working_tools_filename(tools_version: str, platform: str) -> str: @@ -228,28 +271,7 @@ def get_working_tools_filename(tools_version: str, platform: str) -> str: """ tools_base_url = "https://fastdl.mongodb.org/tools/db" - agent_info = load_agent_build_info() - - if platform not in agent_info["platform_mappings"]: - logger.warning(f"Platform {platform} not found in agent mappings") - return "" - - filenames = _build_tools_filenames(agent_info, tools_version, platform) - - for filename in filenames: - url = f"{tools_base_url}/{filename}" - - try: - response = requests.head(url, timeout=30) - if response.status_code == 200: - logger.debug(f"Found working tools filename for {platform}: {filename}") - return filename - except requests.RequestException: - continue - - # If none work, return empty string to indicate platform should be skipped - logger.warning(f"No working tools filename found for {platform}, platform will be skipped") - return "" + return _find_working_filename(tools_version, platform, tools_base_url, _build_tools_filenames, "tools") def get_available_platforms_for_tools(tools_version: str, platforms: List[str]) -> List[str]: diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 2b5b66939..8e1de3e34 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -5,7 +5,6 @@ import json import os import shutil -import sys from concurrent.futures import ProcessPoolExecutor from copy import copy from queue import Queue @@ -21,11 +20,10 @@ get_currently_used_agents, ) from scripts.release.agent.validation import ( + generate_agent_build_args, + generate_tools_build_args, get_available_platforms_for_agent, get_available_platforms_for_tools, - get_working_agent_filename, - get_working_tools_filename, - load_agent_build_info, ) from scripts.release.build.image_build_configuration import ImageBuildConfiguration from scripts.release.build.image_build_process import execute_docker_build @@ -53,74 +51,6 @@ def extract_tools_version_from_release(release: Dict) -> str: return tools_version -def generate_tools_build_args(platforms: List[str], tools_version: str) -> Dict[str, str]: - """ - Generate build arguments for MongoDB tools based on platform mappings. - - Args: - platforms: List of platforms (e.g., ["linux/amd64", "linux/arm64"]) - tools_version: MongoDB tools version - - Returns: - Dictionary of build arguments for docker build (tools only) - """ - agent_info = load_agent_build_info() - build_args = {} - - for platform in platforms: - if platform not in agent_info["platform_mappings"]: - logger.error(f"Platform {platform} not found in agent mappings, skipping") - sys.exit(1) - - mapping = agent_info["platform_mappings"][platform] - - arch = platform.split("/")[-1] - - tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) - tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" - build_args[f"mongodb_tools_version_{arch}"] = tools_filename - - return build_args - - -def generate_agent_build_args(platforms: List[str], agent_version: str, tools_version: str) -> Dict[str, str]: - """ - Generate build arguments for agent image based on platform mappings. - - Args: - platforms: List of platforms (e.g., ["linux/amd64", "linux/arm64"]) - agent_version: MongoDB agent version - tools_version: MongoDB tools version - - Returns: - Dictionary of build arguments for docker build - """ - agent_info = load_agent_build_info() - build_args = {} - - for platform in platforms: - if platform not in agent_info["platform_mappings"]: - logger.warning(f"Platform {platform} not found in agent mappings, skipping") - continue - - arch = platform.split("/")[-1] - - agent_filename = get_working_agent_filename(agent_version, platform) - tools_filename = get_working_tools_filename(tools_version, platform) - - # Only add build args if we have valid filenames - if agent_filename and tools_filename: - build_args[f"mongodb_agent_version_{arch}"] = agent_filename - build_args[f"mongodb_tools_version_{arch}"] = tools_filename - logger.debug(f"Added build args for {platform}: agent={agent_filename}, tools={tools_filename}") - else: - logger.warning(f"Skipping build args for {platform} - missing agent or tools filename") - logger.debug(f" agent_filename: {agent_filename}") - logger.debug(f" tools_filename: {tools_filename}") - - return build_args - - def build_image( build_configuration: ImageBuildConfiguration, build_args: Dict[str, str] = None, @@ -430,13 +360,15 @@ def build_agent(build_configuration: ImageBuildConfiguration): for idx, agent_tools_version in enumerate(agent_versions_to_build): agent_version = agent_tools_version[0] tools_version = agent_tools_version[1] - logger.info(f"======= Building Agent {agent_tools_version} ({idx + 1}/{len(agent_versions_to_build)})") available_agent_platforms = get_available_platforms_for_agent(agent_version, build_configuration.platforms) available_tools_platforms = get_available_platforms_for_tools(tools_version, build_configuration.platforms) - available_platforms = list(set(available_agent_platforms) & set(available_tools_platforms)) + logger.info( + f"======= Building Agent {agent_tools_version} for platforms: {available_platforms}, ({idx + 1}/{len(agent_versions_to_build)})" + ) + # Check if amd64 is available - if not, skip the entire build if "linux/amd64" not in available_platforms: logger.warning( diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py index 4d6c09157..c56eb0c42 100644 --- a/scripts/release/atomic_pipeline_test.py +++ b/scripts/release/atomic_pipeline_test.py @@ -1,15 +1,26 @@ #!/usr/bin/env python3 """ -Test for agent build mapping functionality in atomic_pipeline.py +Test for agent build mapping functionality and validation functions. """ import json import unittest from unittest.mock import patch +from scripts.release.agent.validation import ( + PlatformConfiguration, + generate_agent_build_args, + generate_tools_build_args, + get_available_platforms_for_agent, + get_available_platforms_for_tools, + get_working_agent_filename, + get_working_tools_filename, + load_agent_build_info, +) -class TestAgentBuildMapping(unittest.TestCase): - """Test cases for agent build mapping functionality.""" + +class TestPlatformConfiguration(unittest.TestCase): + """Test cases for PlatformConfiguration class.""" def setUp(self): """Set up test fixtures.""" @@ -17,30 +28,167 @@ def setUp(self): with open("build_info_agent.json", "r") as f: self.agent_build_info = json.load(f) - def test_agent_version_validation(self): - """Test that agent version validation works correctly.""" - from scripts.release.agent.validation import ( - validate_agent_version_exists, - validate_tools_version_exists, - ) + def test_platform_configuration_initialization(self): + """Test that PlatformConfiguration initializes correctly.""" + config = PlatformConfiguration() + self.assertIsNotNone(config.agent_info) + self.assertIn("platforms", config.agent_info) + self.assertIn("base_names", config.agent_info) + + def test_load_agent_build_info(self): + """Test that load_agent_build_info returns correct structure.""" + agent_info = load_agent_build_info() + self.assertIn("platforms", agent_info) + self.assertIn("base_names", agent_info) + + # Check that expected platforms exist + expected_platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] + for platform in expected_platforms: + self.assertIn(platform, agent_info["platforms"]) + + # Check base names + self.assertEqual(agent_info["base_names"]["agent"], "mongodb-mms-automation-agent") + self.assertEqual(agent_info["base_names"]["tools"], "mongodb-database-tools") + + +class TestBuildArgumentGeneration(unittest.TestCase): + """Test cases for build argument generation functions.""" + + def setUp(self): + """Set up test fixtures.""" + self.platforms = ["linux/amd64", "linux/arm64"] + self.tools_version = "100.9.5" + self.agent_version = "13.5.2.7785" + + @patch("scripts.release.agent.validation._validate_url_exists") + def test_generate_tools_build_args(self, mock_validate): + """Test tools build args generation.""" + # Mock URL validation to return True for tools + mock_validate.return_value = True + + build_args = generate_tools_build_args(self.platforms, self.tools_version) + + # Check that build args are generated for each platform + self.assertIn("mongodb_tools_version_amd64", build_args) + self.assertIn("mongodb_tools_version_arm64", build_args) + + # Check that filenames contain the version + self.assertIn(self.tools_version, build_args["mongodb_tools_version_amd64"]) + self.assertIn(self.tools_version, build_args["mongodb_tools_version_arm64"]) + + @patch("scripts.release.agent.validation._validate_url_exists") + def test_generate_agent_build_args(self, mock_validate): + """Test agent build args generation.""" + # Mock URL validation to return True for both agent and tools + mock_validate.return_value = True + + build_args = generate_agent_build_args(self.platforms, self.agent_version, self.tools_version) + + # Check that build args are generated for each platform + self.assertIn("mongodb_agent_version_amd64", build_args) + self.assertIn("mongodb_tools_version_amd64", build_args) + self.assertIn("mongodb_agent_version_arm64", build_args) + self.assertIn("mongodb_tools_version_arm64", build_args) + + +class TestValidationFunctions(unittest.TestCase): + """Test cases for validation and filename functions.""" + + def setUp(self): + """Set up test fixtures.""" + self.platform = "linux/amd64" + self.platforms = ["linux/amd64", "linux/arm64"] + self.tools_version = "100.9.5" + self.agent_version = "13.5.2.7785" - platforms = ["linux/amd64"] - # Test with a known good agent version (this should exist) - good_agent_version = "108.0.12.8846-1" - self.assertTrue(validate_agent_version_exists(good_agent_version, platforms)) +class TestPlatformAvailability(unittest.TestCase): + """Test cases for platform availability functions.""" - # Test with a known bad agent version (this should not exist) - bad_agent_version = "12.0.33.7866-1" - self.assertFalse(validate_agent_version_exists(bad_agent_version, platforms)) + def setUp(self): + """Set up test fixtures.""" + self.platforms = ["linux/amd64", "linux/arm64"] + self.tools_version = "100.9.5" + self.agent_version = "13.5.2.7785" + + @patch("scripts.release.agent.validation._validate_url_exists") + def test_get_available_platforms_for_tools(self, mock_validate): + """Test getting available platforms for tools.""" + # Mock URL validation to return True for amd64, False for arm64 + mock_validate.side_effect = [False, True, False, False] # Try current and old suffixes for each platform + + available_platforms = get_available_platforms_for_tools(self.tools_version, self.platforms) + + self.assertIsInstance(available_platforms, list) + self.assertIn("linux/amd64", available_platforms) + self.assertNotIn("linux/arm64", available_platforms) + + @patch("scripts.release.agent.validation._validate_url_exists") + def test_get_available_platforms_for_agent(self, mock_validate): + """Test getting available platforms for agent.""" + # Mock URL validation to return True for amd64, False for arm64 + mock_validate.side_effect = [True, False] # One call per platform + + available_platforms = get_available_platforms_for_agent(self.agent_version, self.platforms) + + self.assertIsInstance(available_platforms, list) + self.assertIn("linux/amd64", available_platforms) + self.assertNotIn("linux/arm64", available_platforms) + + @patch("scripts.release.agent.validation._validate_url_exists") + def test_get_working_agent_filename(self, mock_validate): + """Test getting working agent filename.""" + mock_validate.return_value = True + + filename = get_working_agent_filename(self.agent_version, "linux/amd64") + + self.assertIsInstance(filename, str) + if filename: # Only check if filename is found + self.assertIn(self.agent_version, filename) + self.assertIn("mongodb-mms-automation-agent", filename) + + @patch("scripts.release.agent.validation._validate_url_exists") + def test_get_working_tools_filename(self, mock_validate): + """Test getting working tools filename.""" + mock_validate.return_value = True + + filename = get_working_tools_filename(self.tools_version, "linux/amd64") + + self.assertIsInstance(filename, str) + if filename: # Only check if filename is found + self.assertIn(self.tools_version, filename) + self.assertIn("mongodb-database-tools", filename) + + def test_get_working_filename_invalid_platform(self): + """Test getting working filename with invalid platform.""" + filename = get_working_agent_filename(self.agent_version, "invalid/platform") + self.assertEqual(filename, "") + + filename = get_working_tools_filename(self.tools_version, "invalid/platform") + self.assertEqual(filename, "") + + +class TestIntegration(unittest.TestCase): + """Integration tests that test the actual functions.""" + + @patch("scripts.release.agent.validation._validate_url_exists") + def test_end_to_end_build_args_generation(self, mock_validate): + """Test end-to-end build args generation as used in atomic_pipeline.""" + # Mock URL validation to return True + mock_validate.return_value = True + + platforms = ["linux/amd64"] + tools_version = "100.9.5" + agent_version = "13.5.2.7785" - # Test with a known good tools version (this should exist) - good_tools_version = "100.12.2" - self.assertTrue(validate_tools_version_exists(good_tools_version, platforms)) + # Test tools build args + tools_args = generate_tools_build_args(platforms, tools_version) + self.assertIn("mongodb_tools_version_amd64", tools_args) - # Test with a known bad tools version (this should not exist) - bad_tools_version = "999.99.99" - self.assertFalse(validate_tools_version_exists(bad_tools_version, platforms)) + # Test agent build args + agent_args = generate_agent_build_args(platforms, agent_version, tools_version) + self.assertIn("mongodb_agent_version_amd64", agent_args) + self.assertIn("mongodb_tools_version_amd64", agent_args) if __name__ == "__main__": diff --git a/scripts/release/tests/build_info_test.py b/scripts/release/tests/build_info_test.py index bef97173d..4dac69b68 100644 --- a/scripts/release/tests/build_info_test.py +++ b/scripts/release/tests/build_info_test.py @@ -246,7 +246,7 @@ def test_load_build_info_staging(git_repo: Repo): images={ "operator": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile.atomic", sign=True, @@ -260,28 +260,28 @@ def test_load_build_info_staging(git_repo: Repo): ), "init-database": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-database", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-init-database/Dockerfile.atomic", sign=True, ), "init-appdb": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-appdb", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", sign=True, ), "init-ops-manager": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-ops-manager", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", sign=True, ), "database": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-database", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile.atomic", sign=True, @@ -295,28 +295,28 @@ def test_load_build_info_staging(git_repo: Repo): ), "meko-tests": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-tests", - platforms=["linux/amd64"], + platforms=["linux/arm64", "linux/amd64"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", sign=False, ), "readiness-probe": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", sign=True, ), "upgrade-hook": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-operator-version-upgrade-post-start-hook", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", sign=True, ), "agent": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", sign=True, @@ -361,56 +361,56 @@ def test_load_build_info_release( images={ "operator": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile.atomic", sign=True, ), "init-database": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-init-database", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-kubernetes-init-database/Dockerfile.atomic", sign=True, ), "init-appdb": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-init-appdb", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", sign=True, ), "init-ops-manager": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-init-ops-manager", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", sign=True, ), "database": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-database", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile.atomic", sign=True, ), "readiness-probe": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-readinessprobe", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=readinessprobe_version, dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", sign=True, ), "upgrade-hook": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=operator_version_upgrade_post_start_hook_version, dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", sign=True, ), "agent": ImageInfo( repository="quay.io/mongodb/mongodb-agent-ubi", - platforms=["linux/arm64", "linux/amd64"], + platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", sign=True, diff --git a/scripts/release/tests/release_info_test.py b/scripts/release/tests/release_info_test.py index 213f5d8e6..cb0bd8e28 100644 --- a/scripts/release/tests/release_info_test.py +++ b/scripts/release/tests/release_info_test.py @@ -15,37 +15,37 @@ def test_create_release_info_json( "images": { "operator": { "repository": "quay.io/mongodb/mongodb-kubernetes", - "platforms": ["linux/arm64", "linux/amd64"], + "platforms": ["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], "version": "1.2.0", }, "init-database": { "repository": "quay.io/mongodb/mongodb-kubernetes-init-database", - "platforms": ["linux/arm64", "linux/amd64"], + "platforms": ["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], "version": "1.2.0", }, "init-appdb": { "repository": "quay.io/mongodb/mongodb-kubernetes-init-appdb", - "platforms": ["linux/arm64", "linux/amd64"], + "platforms": ["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], "version": "1.2.0", }, "init-ops-manager": { "repository": "quay.io/mongodb/mongodb-kubernetes-init-ops-manager", - "platforms": ["linux/arm64", "linux/amd64"], + "platforms": ["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], "version": "1.2.0", }, "database": { "repository": "quay.io/mongodb/mongodb-kubernetes-database", - "platforms": ["linux/arm64", "linux/amd64"], + "platforms": ["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], "version": "1.2.0", }, "readiness-probe": { "repository": "quay.io/mongodb/mongodb-kubernetes-readinessprobe", - "platforms": ["linux/arm64", "linux/amd64"], + "platforms": ["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], "version": readinessprobe_version, }, "upgrade-hook": { "repository": "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook", - "platforms": ["linux/arm64", "linux/amd64"], + "platforms": ["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], "version": operator_version_upgrade_post_start_hook_version, }, }, diff --git a/scripts/release/tests/test_detect_ops_manager_changes.py b/scripts/release/tests/test_detect_ops_manager_changes.py index af8c173a3..63b283b75 100644 --- a/scripts/release/tests/test_detect_ops_manager_changes.py +++ b/scripts/release/tests/test_detect_ops_manager_changes.py @@ -221,46 +221,6 @@ def test_cloud_manager_tools_changed(self): changed_agents = detect_ops_manager_changes() self.assertIn(("13.37.0.9590-1", "100.13.0"), changed_agents) - def test_cloud_manager_downgrade_not_detected(self): - """Test that cloud manager downgrade is NOT detected""" - modified_current = json.loads(json.dumps(self.current_release_data)) - # Downgrade from 13.37.0.9590-1 to 13.36.0.9500-1 - modified_current["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["cloud_manager"] = "13.36.0.9500-1" - - with ( - patch( - "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", - return_value=modified_current, - ), - patch( - "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", - return_value=self.master_release_data, - ), - ): - - changed_agents = detect_ops_manager_changes() - # Downgrade should NOT be detected - self.assertEqual(changed_agents, []) - - def test_ops_manager_version_removed(self): - """Test detection when OM version is removed""" - modified_current = json.loads(json.dumps(self.current_release_data)) - del modified_current["supportedImages"]["mongodb-agent"]["opsManagerMapping"]["ops_manager"]["7.0.11"] - - with ( - patch( - "scripts.release.agent.detect_ops_manager_changes.load_current_release_json", - return_value=modified_current, - ), - patch( - "scripts.release.agent.detect_ops_manager_changes.load_release_json_from_master", - return_value=self.master_release_data, - ), - ): - - changed_agents = detect_ops_manager_changes() - self.assertEqual(changed_agents, []) - def test_both_om_and_cm_changed(self): """Test detection when both OM version and cloud manager are changed""" modified_current = json.loads(json.dumps(self.current_release_data)) From 67b3992815c651403e25375a207ac34dd1dbc3f2 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 20 Aug 2025 17:14:18 +0200 Subject: [PATCH 150/164] migrate repo from mongodb/mongodb-agent-ubi to mongodb/mongodb-agent --- build_info.json | 2 +- .../operator/appdbreplicaset_controller.go | 2 +- .../appdbreplicaset_controller_test.go | 12 +- .../mongodbmultireplicaset_controller_test.go | 2 +- .../mongodbopsmanager_controller_test.go | 2 +- .../mongodbreplicaset_controller_test.go | 4 +- .../mongodbshardedcluster_controller_test.go | 2 +- .../mongodbstandalone_controller_test.go | 2 +- .../output/0100_install_operator.out | 6 +- generate_ssdlc_report.py | 2 +- inventories/agent.yaml | 2 +- .../config/manager/manager.yaml | 2 +- .../deploy/openshift/operator_openshift.yaml | 2 +- .../replica_set_operator_upgrade_test.go | 2 +- .../test/e2e/setup/test_config.go | 2 +- pkg/util/architectures/static.go | 2 +- .../output/0210_helm_install_operator.out | 6 +- .../release/1.2.0.clusterserviceversion.yaml | 390 +++++++++--------- scripts/dev/release/backup_csv_images.py | 6 +- scripts/dev/release/test_backup_script.py | 14 +- scripts/release/tests/build_info_test.py | 2 +- 21 files changed, 233 insertions(+), 233 deletions(-) diff --git a/build_info.json b/build_info.json index bead3e240..e0507f387 100644 --- a/build_info.json +++ b/build_info.json @@ -267,7 +267,7 @@ }, "release": { "sign": true, - "repository": "quay.io/mongodb/mongodb-agent-ubi", + "repository": "quay.io/mongodb/mongodb-agent", "platforms": [ "linux/arm64", "linux/amd64", diff --git a/controllers/operator/appdbreplicaset_controller.go b/controllers/operator/appdbreplicaset_controller.go index 16436be0c..071ba8336 100644 --- a/controllers/operator/appdbreplicaset_controller.go +++ b/controllers/operator/appdbreplicaset_controller.go @@ -594,7 +594,7 @@ func (r *ReconcileAppDbReplicaSet) ReconcileAppDB(ctx context.Context, opsManage appdbOpts.LegacyMonitoringAgentImage = images.ContainerImage(r.imageUrls, mcoConstruct.AgentImageEnv, legacyMonitoringAgentVersion) - // AgentImageEnv contains the full container image uri e.g. quay.io/mongodb/mongodb-agent-ubi:107.0.0.8502-1 + // AgentImageEnv contains the full container image uri e.g. quay.io/mongodb/mongodb-agent:107.0.0.8502-1 // In non-static containers we don't ask OM for the correct version, therefore we just rely on the provided // environment variable. appdbOpts.AgentImage = r.imageUrls[mcoConstruct.AgentImageEnv] diff --git a/controllers/operator/appdbreplicaset_controller_test.go b/controllers/operator/appdbreplicaset_controller_test.go index 8559cf824..a7aa6f99f 100644 --- a/controllers/operator/appdbreplicaset_controller_test.go +++ b/controllers/operator/appdbreplicaset_controller_test.go @@ -442,7 +442,7 @@ func TestTryConfigureMonitoringInOpsManagerWithCustomTemplate(t *testing.T) { Containers: []corev1.Container{ { Name: "mongodb-agent", - Image: "quay.io/mongodb/mongodb-agent-ubi:10", + Image: "quay.io/mongodb/mongodb-agent:10", }, { Name: "mongod", @@ -450,7 +450,7 @@ func TestTryConfigureMonitoringInOpsManagerWithCustomTemplate(t *testing.T) { }, { Name: "mongodb-agent-monitoring", - Image: "quay.io/mongodb/mongodb-agent-ubi:20", + Image: "quay.io/mongodb/mongodb-agent:20", }, }, }, @@ -466,7 +466,7 @@ func TestTryConfigureMonitoringInOpsManagerWithCustomTemplate(t *testing.T) { foundImages := 0 for _, c := range appDbSts.Spec.Template.Spec.Containers { if c.Name == "mongodb-agent" { - assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:10", c.Image) + assert.Equal(t, "quay.io/mongodb/mongodb-agent:10", c.Image) foundImages += 1 } if c.Name == "mongod" { @@ -474,7 +474,7 @@ func TestTryConfigureMonitoringInOpsManagerWithCustomTemplate(t *testing.T) { foundImages += 1 } if c.Name == "mongodb-agent-monitoring" { - assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:20", c.Image) + assert.Equal(t, "quay.io/mongodb/mongodb-agent:20", c.Image) foundImages += 1 } } @@ -492,7 +492,7 @@ func TestTryConfigureMonitoringInOpsManagerWithCustomTemplate(t *testing.T) { foundImages := 0 for _, c := range appDbSts.Spec.Template.Spec.Containers { if c.Name == "mongodb-agent" { - assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:10", c.Image) + assert.Equal(t, "quay.io/mongodb/mongodb-agent:10", c.Image) foundImages += 1 } if c.Name == "mongod" { @@ -500,7 +500,7 @@ func TestTryConfigureMonitoringInOpsManagerWithCustomTemplate(t *testing.T) { foundImages += 1 } if c.Name == "mongodb-agent-monitoring" { - assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:20", c.Image) + assert.Equal(t, "quay.io/mongodb/mongodb-agent:20", c.Image) foundImages += 1 } } diff --git a/controllers/operator/mongodbmultireplicaset_controller_test.go b/controllers/operator/mongodbmultireplicaset_controller_test.go index 21a6b1c99..9766d7c7a 100644 --- a/controllers/operator/mongodbmultireplicaset_controller_test.go +++ b/controllers/operator/mongodbmultireplicaset_controller_test.go @@ -138,7 +138,7 @@ func TestMultiReplicaSetClusterReconcileContainerImagesWithStaticArchitecture(t databaseRelatedImageEnv := fmt.Sprintf("RELATED_IMAGE_%s_8_0_0_ubi9", mcoConstruct.MongodbImageEnv) imageUrlsMock := images.ImageUrls{ - architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent-ubi", + architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent", mcoConstruct.MongodbImageEnv: "quay.io/mongodb/mongodb-enterprise-server", databaseRelatedImageEnv: "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", } diff --git a/controllers/operator/mongodbopsmanager_controller_test.go b/controllers/operator/mongodbopsmanager_controller_test.go index 07291dabb..2631f69c9 100644 --- a/controllers/operator/mongodbopsmanager_controller_test.go +++ b/controllers/operator/mongodbopsmanager_controller_test.go @@ -551,7 +551,7 @@ func TestOpsManagerReconcileContainerImagesWithStaticArchitecture(t *testing.T) // AppDB images mongodbRelatedImageEnv: "quay.io/mongodb/mongodb-enterprise-appdb-database-ubi@sha256:MONGODB_SHA", - architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent-ubi", + architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent", } ctx := context.Background() diff --git a/controllers/operator/mongodbreplicaset_controller_test.go b/controllers/operator/mongodbreplicaset_controller_test.go index 64c3f724c..f6aea38c8 100644 --- a/controllers/operator/mongodbreplicaset_controller_test.go +++ b/controllers/operator/mongodbreplicaset_controller_test.go @@ -129,7 +129,7 @@ func TestReplicaSetClusterReconcileContainerImagesWithStaticArchitecture(t *test databaseRelatedImageEnv := fmt.Sprintf("RELATED_IMAGE_%s_8_0_0_ubi9", mcoConstruct.MongodbImageEnv) imageUrlsMock := images.ImageUrls{ - architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent-ubi", + architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent", mcoConstruct.MongodbImageEnv: "quay.io/mongodb/mongodb-enterprise-server", databaseRelatedImageEnv: "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", } @@ -157,7 +157,7 @@ func TestReplicaSetClusterReconcileContainerImagesWithStaticArchitecture(t *test func VerifyStaticContainers(t *testing.T, containers []corev1.Container) { agentContainerImage := findContainerImage(containers, util.AgentContainerName) require.NotNil(t, agentContainerImage, "Agent container not found") - assert.Equal(t, "quay.io/mongodb/mongodb-agent-ubi:12.0.30.7791-1", agentContainerImage) + assert.Equal(t, "quay.io/mongodb/mongodb-agent:12.0.30.7791-1", agentContainerImage) mongoContainerImage := findContainerImage(containers, util.DatabaseContainerName) require.NotNil(t, mongoContainerImage, "MongoDB container not found") diff --git a/controllers/operator/mongodbshardedcluster_controller_test.go b/controllers/operator/mongodbshardedcluster_controller_test.go index 3fc1547e4..12ca849b8 100644 --- a/controllers/operator/mongodbshardedcluster_controller_test.go +++ b/controllers/operator/mongodbshardedcluster_controller_test.go @@ -294,7 +294,7 @@ func TestShardedClusterReconcileContainerImagesWithStaticArchitecture(t *testing sc := test.DefaultClusterBuilder().SetVersion("8.0.0").SetShardCountSpec(1).Build() imageUrlsMock := images.ImageUrls{ - architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent-ubi", + architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent", mcoConstruct.MongodbImageEnv: "quay.io/mongodb/mongodb-enterprise-server", databaseRelatedImageEnv: "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", } diff --git a/controllers/operator/mongodbstandalone_controller_test.go b/controllers/operator/mongodbstandalone_controller_test.go index f6438ed68..69bdfbb87 100644 --- a/controllers/operator/mongodbstandalone_controller_test.go +++ b/controllers/operator/mongodbstandalone_controller_test.go @@ -109,7 +109,7 @@ func TestStandaloneClusterReconcileContainerImagesWithStaticArchitecture(t *test databaseRelatedImageEnv := fmt.Sprintf("RELATED_IMAGE_%s_8_0_0_ubi9", mcoConstruct.MongodbImageEnv) imageUrlsMock := images.ImageUrls{ - architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent-ubi", + architectures.MdbAgentImageRepo: "quay.io/mongodb/mongodb-agent", mcoConstruct.MongodbImageEnv: "quay.io/mongodb/mongodb-enterprise-server", databaseRelatedImageEnv: "quay.io/mongodb/mongodb-enterprise-server:@sha256:MONGODB_DATABASE", } diff --git a/docs/community-search/quick-start/output/0100_install_operator.out b/docs/community-search/quick-start/output/0100_install_operator.out index 035e24e8a..5ca0af2c3 100644 --- a/docs/community-search/quick-start/output/0100_install_operator.out +++ b/docs/community-search/quick-start/output/0100_install_operator.out @@ -510,9 +510,9 @@ spec: - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: MDB_AGENT_IMAGE_REPOSITORY - value: "quay.io/mongodb/mongodb-agent-ubi" + value: "quay.io/mongodb/mongodb-agent" - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL @@ -531,7 +531,7 @@ spec: value: mongodb-kubernetes-operator # Community Env Vars Start - name: MDB_COMMUNITY_AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: VERSION_UPGRADE_HOOK_IMAGE value: "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.9" - name: READINESS_PROBE_IMAGE diff --git a/generate_ssdlc_report.py b/generate_ssdlc_report.py index 89183d033..ed3b60696 100755 --- a/generate_ssdlc_report.py +++ b/generate_ssdlc_report.py @@ -98,7 +98,7 @@ def get_supported_images(release: Dict) -> dict[str, SupportedImage]: supported_images["mongodb-agent-ubi"] = SupportedImage( get_supported_version_for_image("mongodb-agent"), "mongodb-agent-ubi", - "quay.io/mongodb/mongodb-agent-ubi", + "quay.io/mongodb/mongodb-agent", release["supportedImages"]["mongodb-agent"]["ssdlc_name"], list(), # Once MCK supports both architectures, this should be re-enabled. diff --git a/inventories/agent.yaml b/inventories/agent.yaml index bcb2d5889..6601c7363 100644 --- a/inventories/agent.yaml +++ b/inventories/agent.yaml @@ -1,5 +1,5 @@ vars: - quay_registry: quay.io/mongodb/mongodb-agent-ubi + quay_registry: quay.io/mongodb/mongodb-agent s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-agent images: diff --git a/mongodb-community-operator/config/manager/manager.yaml b/mongodb-community-operator/config/manager/manager.yaml index 9013a8451..4f64da811 100644 --- a/mongodb-community-operator/config/manager/manager.yaml +++ b/mongodb-community-operator/config/manager/manager.yaml @@ -45,7 +45,7 @@ spec: - name: OPERATOR_NAME value: mongodb-kubernetes-operator - name: AGENT_IMAGE - value: quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1 + value: quay.io/mongodb/mongodb-agent:108.0.2.8729-1 - name: VERSION_UPGRADE_HOOK_IMAGE value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.9 - name: READINESS_PROBE_IMAGE diff --git a/mongodb-community-operator/deploy/openshift/operator_openshift.yaml b/mongodb-community-operator/deploy/openshift/operator_openshift.yaml index c0f3cf1ce..557074846 100644 --- a/mongodb-community-operator/deploy/openshift/operator_openshift.yaml +++ b/mongodb-community-operator/deploy/openshift/operator_openshift.yaml @@ -47,7 +47,7 @@ spec: - name: OPERATOR_NAME value: mongodb-kubernetes-operator - name: AGENT_IMAGE - value: quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1 + value: quay.io/mongodb/mongodb-agent:108.0.2.8729-1 - name: READINESS_PROBE_IMAGE value: quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.22 - name: VERSION_UPGRADE_HOOK_IMAGE diff --git a/mongodb-community-operator/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go b/mongodb-community-operator/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go index 0625d70f5..98ee9f385 100644 --- a/mongodb-community-operator/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go +++ b/mongodb-community-operator/test/e2e/replica_set_operator_upgrade/replica_set_operator_upgrade_test.go @@ -163,7 +163,7 @@ func TestReplicaSetOperatorUpgradeFrom0_7_2(t *testing.T) { testConfig.OperatorVersion = "0.7.2" testConfig.VersionUpgradeHookImage = "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.3" testConfig.ReadinessProbeImage = "quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.6" - testConfig.AgentImage = "quay.io/mongodb/mongodb-agent-ubi:11.0.5.6963-1" + testConfig.AgentImage = "quay.io/mongodb/mongodb-agent:11.0.5.6963-1" testCtx := setup.SetupWithTestConfig(ctx, t, testConfig, true, false, resourceName) defer testCtx.Teardown() diff --git a/mongodb-community-operator/test/e2e/setup/test_config.go b/mongodb-community-operator/test/e2e/setup/test_config.go index 57fd71d3e..399155a12 100644 --- a/mongodb-community-operator/test/e2e/setup/test_config.go +++ b/mongodb-community-operator/test/e2e/setup/test_config.go @@ -48,7 +48,7 @@ func LoadTestConfigFromEnv() TestConfig { MongoDBRepoUrl: envvar.GetEnvOrDefault(construct.MongodbRepoUrlEnv, "quay.io/mongodb"), // nolint:forbidigo VersionUpgradeHookImage: envvar.GetEnvOrDefault(construct.VersionUpgradeHookImageEnv, "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.2"), // nolint:forbidigo // TODO: MCK better way to decide default agent image. - AgentImage: envvar.GetEnvOrDefault("MDB_COMMUNITY_AGENT_IMAGE", "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1"), // nolint:forbidigo + AgentImage: envvar.GetEnvOrDefault("MDB_COMMUNITY_AGENT_IMAGE", "quay.io/mongodb/mongodb-agent:108.0.2.8729-1"), // nolint:forbidigo ClusterWide: envvar.ReadBool(clusterWideEnvName), // nolint:forbidigo PerformCleanup: envvar.ReadBool(performCleanupEnvName), // nolint:forbidigo ReadinessProbeImage: envvar.GetEnvOrDefault(construct.ReadinessProbeImageEnv, "quay.io/mongodb/mongodb-kubernetes-readinessprobe:1.0.3"), // nolint:forbidigo diff --git a/pkg/util/architectures/static.go b/pkg/util/architectures/static.go index dc398dabe..40d7f177a 100644 --- a/pkg/util/architectures/static.go +++ b/pkg/util/architectures/static.go @@ -41,7 +41,7 @@ const ( MdbAssumeEnterpriseImage = "MDB_ASSUME_ENTERPRISE_IMAGE" // MdbAgentImageRepo contains the repository containing the agent image for the database MdbAgentImageRepo = "MDB_AGENT_IMAGE_REPOSITORY" - MdbAgentImageRepoDefault = "quay.io/mongodb/mongodb-agent-ubi" + MdbAgentImageRepoDefault = "quay.io/mongodb/mongodb-agent" ) // IsRunningStaticArchitecture checks whether the operator is running in static or non-static mode. diff --git a/public/architectures/setup-multi-cluster/setup-operator/output/0210_helm_install_operator.out b/public/architectures/setup-multi-cluster/setup-operator/output/0210_helm_install_operator.out index cc5174432..20f59b746 100644 --- a/public/architectures/setup-multi-cluster/setup-operator/output/0210_helm_install_operator.out +++ b/public/architectures/setup-multi-cluster/setup-operator/output/0210_helm_install_operator.out @@ -331,9 +331,9 @@ spec: - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: MDB_AGENT_IMAGE_REPOSITORY - value: "quay.io/mongodb/mongodb-agent-ubi" + value: "quay.io/mongodb/mongodb-agent" - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL @@ -352,7 +352,7 @@ spec: value: mongodb-kubernetes-operator-multi-cluster # Community Env Vars Start - name: MDB_COMMUNITY_AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: VERSION_UPGRADE_HOOK_IMAGE value: "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.9" - name: READINESS_PROBE_IMAGE diff --git a/scripts/dev/release/1.2.0.clusterserviceversion.yaml b/scripts/dev/release/1.2.0.clusterserviceversion.yaml index 6618e522d..a8cbc7ce2 100644 --- a/scripts/dev/release/1.2.0.clusterserviceversion.yaml +++ b/scripts/dev/release/1.2.0.clusterserviceversion.yaml @@ -676,9 +676,9 @@ spec: - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + value: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 - name: MDB_AGENT_IMAGE_REPOSITORY - value: quay.io/mongodb/mongodb-agent-ubi + value: quay.io/mongodb/mongodb-agent - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL @@ -698,7 +698,7 @@ spec: - name: OPERATOR_NAME value: mongodb-kubernetes-operator - name: MDB_COMMUNITY_AGENT_IMAGE - value: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + value: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 - name: VERSION_UPGRADE_HOOK_IMAGE value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.9 - name: READINESS_PROBE_IMAGE @@ -718,133 +718,133 @@ spec: - name: RELATED_IMAGE_INIT_APPDB_IMAGE_REPOSITORY_1_2_0 value: quay.io/mongodb/mongodb-kubernetes-init-appdb@sha256:2230112283c5ab3a7f8d9b37d9f98dd9e13960f8b3eff467366c0382bcf7e3fd - name: RELATED_IMAGE_AGENT_IMAGE_107_0_10_8627_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 + value: quay.io/mongodb/mongodb-agent@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_10_8627_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae + value: quay.io/mongodb/mongodb-agent@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae - name: RELATED_IMAGE_AGENT_IMAGE_107_0_10_8627_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 + value: quay.io/mongodb/mongodb-agent@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_10_8627_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf + value: quay.io/mongodb/mongodb-agent@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 + value: quay.io/mongodb/mongodb-agent@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 + value: quay.io/mongodb/mongodb-agent@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 + value: quay.io/mongodb/mongodb-agent@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 + value: quay.io/mongodb/mongodb-agent@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d + value: quay.io/mongodb/mongodb-agent@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f + value: quay.io/mongodb/mongodb-agent@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 + value: quay.io/mongodb/mongodb-agent@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 + value: quay.io/mongodb/mongodb-agent@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 + value: quay.io/mongodb/mongodb-agent@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 + value: quay.io/mongodb/mongodb-agent@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 + value: quay.io/mongodb/mongodb-agent@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 + value: quay.io/mongodb/mongodb-agent@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 + value: quay.io/mongodb/mongodb-agent@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c + value: quay.io/mongodb/mongodb-agent@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 + value: quay.io/mongodb/mongodb-agent@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 + value: quay.io/mongodb/mongodb-agent@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 + value: quay.io/mongodb/mongodb-agent@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba + value: quay.io/mongodb/mongodb-agent@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac + value: quay.io/mongodb/mongodb-agent@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 + value: quay.io/mongodb/mongodb-agent@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 + value: quay.io/mongodb/mongodb-agent@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 + value: quay.io/mongodb/mongodb-agent@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d + value: quay.io/mongodb/mongodb-agent@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd + value: quay.io/mongodb/mongodb-agent@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + value: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 + value: quay.io/mongodb/mongodb-agent@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe + value: quay.io/mongodb/mongodb-agent@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d + value: quay.io/mongodb/mongodb-agent@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c + value: quay.io/mongodb/mongodb-agent@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 + value: quay.io/mongodb/mongodb-agent@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 + value: quay.io/mongodb/mongodb-agent@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 + value: quay.io/mongodb/mongodb-agent@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 + value: quay.io/mongodb/mongodb-agent@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 + value: quay.io/mongodb/mongodb-agent@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 + value: quay.io/mongodb/mongodb-agent@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 + value: quay.io/mongodb/mongodb-agent@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 + value: quay.io/mongodb/mongodb-agent@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d + value: quay.io/mongodb/mongodb-agent@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 + value: quay.io/mongodb/mongodb-agent@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc + value: quay.io/mongodb/mongodb-agent@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe + value: quay.io/mongodb/mongodb-agent@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c + value: quay.io/mongodb/mongodb-agent@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 + value: quay.io/mongodb/mongodb-agent@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee + value: quay.io/mongodb/mongodb-agent@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee - name: RELATED_IMAGE_AGENT_IMAGE_12_0_33_7866_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 + value: quay.io/mongodb/mongodb-agent@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_33_7866_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 + value: quay.io/mongodb/mongodb-agent@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_33_7866_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a + value: quay.io/mongodb/mongodb-agent@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a - name: RELATED_IMAGE_AGENT_IMAGE_12_0_33_7866_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 + value: quay.io/mongodb/mongodb-agent@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 + value: quay.io/mongodb/mongodb-agent@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 + value: quay.io/mongodb/mongodb-agent@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 + value: quay.io/mongodb/mongodb-agent@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 + value: quay.io/mongodb/mongodb-agent@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c + value: quay.io/mongodb/mongodb-agent@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c + value: quay.io/mongodb/mongodb-agent@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 + value: quay.io/mongodb/mongodb-agent@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 + value: quay.io/mongodb/mongodb-agent@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 - name: RELATED_IMAGE_AGENT_IMAGE_13_36_0_9555_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c + value: quay.io/mongodb/mongodb-agent@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c - name: RELATED_IMAGE_AGENT_IMAGE_13_36_0_9555_1_1_0_1 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 + value: quay.io/mongodb/mongodb-agent@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 - name: RELATED_IMAGE_AGENT_IMAGE_13_36_0_9555_1_1_1_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 + value: quay.io/mongodb/mongodb-agent@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 - name: RELATED_IMAGE_AGENT_IMAGE_13_36_0_9555_1_1_2_0 - value: quay.io/mongodb/mongodb-agent-ubi@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad + value: quay.io/mongodb/mongodb-agent@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_25 value: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:e424ff1ac407e75edadde07d2c0e7d76759b1ad96d57273a68386d3f6710dadf - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_26 @@ -1107,11 +1107,11 @@ spec: provider: name: MongoDB, Inc relatedImages: - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 + - image: quay.io/mongodb/mongodb-agent@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 name: agent-image-107-0-10-8627-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d + - image: quay.io/mongodb/mongodb-agent@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d name: agent-image-108-0-2-8729-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + - image: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 name: agent_image_108_0_2_8729_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:b00ac773623e1bf422b47e6d783263d2d08737865abe9abbe106d86d1ea25165 name: mongodb_image_4_4_14_ubi8 @@ -1121,33 +1121,33 @@ spec: name: mongodb_image_5_0_10_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:df6081325ea2fe01d1b27d0c70005dec8232aaf51a43d91661b722b8a1761263 name: mongodb-image-5-0-6-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 + - image: quay.io/mongodb/mongodb-agent@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 name: agent-image-108-0-2-8729-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 + - image: quay.io/mongodb/mongodb-agent@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 name: agent_image_12_0_35_7911_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:9a3a6496be0d2d446e6d654eb56386c7726e0cd2d7232f6dcec579923edf0014 name: mongodb_image_4_4_4_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:11ab324698f919f84a5dab5a964d93fe63f98a48146bb3b86974cba7606d30ec name: mongodb_image_4_4_8_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d + - image: quay.io/mongodb/mongodb-agent@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d name: agent-image-108-0-6-8796-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 + - image: quay.io/mongodb/mongodb-agent@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 name: agent-image-108-0-4-8770-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:037392b561f59674e70b9cde634afd5df09b02f9e61c18ba946d812701c69e2c name: mongodb-image-5-0-14-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:fe3e7635cbdc89b0015e509fe310aed1a5812180d03cc9d1816a87bfdbbc094f name: mongodb-image-5-0-17-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae + - image: quay.io/mongodb/mongodb-agent@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae name: agent_image_107_0_10_8627_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c + - image: quay.io/mongodb/mongodb-agent@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c name: agent-image-107-0-15-8741-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee + - image: quay.io/mongodb/mongodb-agent@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee name: agent-image-108-0-7-8810-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:33a30d11969eb21952681108092fe7f916e25c8e1de43e6a4e40e126025a2bec name: mongodb-image-4-4-10-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 + - image: quay.io/mongodb/mongodb-agent@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 name: agent_image_13_36_0_9555_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 + - image: quay.io/mongodb/mongodb-agent@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 name: agent-image-107-0-11-8645-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:5545696bf7e536c0030f5f5682be2fcdde5213603095ba05ad937e3e43dcc03d name: ops-manager-image-repository-8-0-4 @@ -1155,15 +1155,15 @@ spec: name: ops_manager_image_repository_8_0_2 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:dfd4277f0c16ed476ac7de51894739e8c8d13a65ea5024b9a97b3dce0f6c6975 name: mongodb_image_4_4_13_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a + - image: quay.io/mongodb/mongodb-agent@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a name: agent-image-12-0-33-7866-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 + - image: quay.io/mongodb/mongodb-agent@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 name: agent-image-12-0-34-7888-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 + - image: quay.io/mongodb/mongodb-agent@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 name: agent_image_107_0_15_8741_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe + - image: quay.io/mongodb/mongodb-agent@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe name: agent_image_108_0_2_8729_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 + - image: quay.io/mongodb/mongodb-agent@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 name: agent_image_108_0_3_8758_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3da86ad85be8588c70ad1ec0bd22ce0e063ad2085d6a7373e7956e1999782ac2 name: mongodb_image_5_0_9_ubi8 @@ -1171,11 +1171,11 @@ spec: name: mdb_search_image_1_47_0 - image: quay.io/mongodb/mongodb-kubernetes-init-appdb@sha256:2230112283c5ab3a7f8d9b37d9f98dd9e13960f8b3eff467366c0382bcf7e3fd name: init-appdb-image-repository-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae + - image: quay.io/mongodb/mongodb-agent@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae name: agent-image-107-0-10-8627-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 + - image: quay.io/mongodb/mongodb-agent@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 name: agent-image-108-0-0-8694-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 + - image: quay.io/mongodb/mongodb-agent@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 name: agent-image-108-0-3-8758-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:e424ff1ac407e75edadde07d2c0e7d76759b1ad96d57273a68386d3f6710dadf name: ops-manager-image-repository-6-0-25 @@ -1183,11 +1183,11 @@ spec: name: mongodb-image-5-0-3-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:fad91b02e0b484a9903879c5a851c78fce1af5ce84254907b70c4d9b1bd47970 name: mongodb-image-5-0-15-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f + - image: quay.io/mongodb/mongodb-agent@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f name: agent-image-107-0-12-8669-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c + - image: quay.io/mongodb/mongodb-agent@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c name: agent-image-12-0-35-7911-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 + - image: quay.io/mongodb/mongodb-agent@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 name: agent_image_108_0_1_8718_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:09f8e1052e572fa2c54611109b237cdd834f69b43412ec2deecab3a7427e0a78 name: ops_manager_image_repository_6_0_27 @@ -1199,41 +1199,41 @@ spec: name: mongodb-image-5-0-0-ubi8 - image: quay.io/mongodb/mongodb-kubernetes-init-ops-manager@sha256:c07c502e38e5106558da1f1d210b311164ababd55a9bfd07f4a19e488156b32c name: init_ops_manager_image_repository_1_2_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 + - image: quay.io/mongodb/mongodb-agent@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 name: agent_image_107_0_12_8669_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:19925e1297ab652db59c35f53e7b7c8b2b277f8ecdc4215682961aaf2b3e924e name: mongodb_image_4_4_0_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:8445d59d491a6d3890ea9c8b421214bcd0fdcfbc420ed40497a790b79f0bc89e name: mongodb_image_4_4_17_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c + - image: quay.io/mongodb/mongodb-agent@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c name: agent-image-108-0-3-8758-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 + - image: quay.io/mongodb/mongodb-agent@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 name: agent-image-108-0-6-8796-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 + - image: quay.io/mongodb/mongodb-agent@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 name: agent-image-12-0-35-7911-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:d65a63011d54cee08f8fa1e1bb608707070d98e5f86e3a455345c996c7e53743 name: ops-manager-image-repository-8-0-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:11ab324698f919f84a5dab5a964d93fe63f98a48146bb3b86974cba7606d30ec name: mongodb-image-4-4-8-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 + - image: quay.io/mongodb/mongodb-agent@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 name: agent_image_107_0_11_8645_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d + - image: quay.io/mongodb/mongodb-agent@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d name: agent_image_108_0_6_8796_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba + - image: quay.io/mongodb/mongodb-agent@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba name: agent-image-108-0-0-8694-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 + - image: quay.io/mongodb/mongodb-agent@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 name: agent_image_108_0_4_8770_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 + - image: quay.io/mongodb/mongodb-agent@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 name: agent_image_108_0_6_8796_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 + - image: quay.io/mongodb/mongodb-agent@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 name: agent_image_12_0_33_7866_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:862ecb37018830c81c35bf03825ecb17cca93c58f6d591bf6bd713c089636fe7 name: ops_manager_image_repository_7_0_11 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:0fde236fe26d5b20210d6f246aadacd7845874712def516cbe2eec11d79d5181 name: mongodb_image_4_4_5_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac + - image: quay.io/mongodb/mongodb-agent@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac name: agent-image-108-0-0-8694-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 + - image: quay.io/mongodb/mongodb-agent@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 name: agent-image-108-0-3-8758-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:f2d9a3264a8814f33143b73c56624572fb80afeab184133d3a901713a31c1a87 name: mongodb_image_5_0_4_ubi8 @@ -1245,41 +1245,41 @@ spec: name: mongodb_enterprise_database_image_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:2bcdb6e94e4436d84950eb1eea40cd5a7f6d33624b9f5f9f40a13e427d8fca9c name: mongodb_image_6_0_0_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 + - image: quay.io/mongodb/mongodb-agent@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 name: agent_image_12_0_34_7888_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf + - image: quay.io/mongodb/mongodb-agent@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf name: agent-image-107-0-10-8627-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe + - image: quay.io/mongodb/mongodb-agent@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe name: agent-image-108-0-2-8729-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc + - image: quay.io/mongodb/mongodb-agent@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc name: agent-image-108-0-6-8796-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 + - image: quay.io/mongodb/mongodb-agent@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 name: agent-image-12-0-34-7888-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:2bcdb6e94e4436d84950eb1eea40cd5a7f6d33624b9f5f9f40a13e427d8fca9c name: mongodb-image-6-0-0-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1d0caf8dbe8796046f92d98541f152cded564f5a0d8e2b3af53d48aa6e3b9793 name: mongodb-image-8-0-0-ubi9 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 + - image: quay.io/mongodb/mongodb-agent@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 name: agent_image_107_0_10_8627_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:d49417ea85ee1ddb84429552a5aac87a82c2843904d8c0300a43b657b7a49e0e name: mongodb_image_5_0_1_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 + - image: quay.io/mongodb/mongodb-agent@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 name: agent-image-107-0-15-8741-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:15e31285c8949a4666f7702df53416b0905d76945a28575a6028a876cc8a6f0f name: mongodb-image-4-4-7-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:11dba8ee4f2706d952c42ecf09a76a0157c231a981d248dc22e255beed5d74d1 name: mongodb-image-5-0-11-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 + - image: quay.io/mongodb/mongodb-agent@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 name: agent_image_108_0_6_8796_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 + - image: quay.io/mongodb/mongodb-agent@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 name: agent_image_12_0_33_7866_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:8d7e5361c2a1604dec3fc671d629806fe6ddc6df524b69a441b86933a4b30093 name: mongodb_image_4_4_2_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1d418c65d82ccad205b7889be4ba161d3cbfa8b1fb75b21c8fcf02de38fad9ca name: mongodb_image_4_4_11_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + - image: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 name: agent-image-108-0-2-8729-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c + - image: quay.io/mongodb/mongodb-agent@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c name: agent-image-13-36-0-9555-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:cb2992beecff3b95d31b34ab7f3fc01b57df6278f0ace53c8ec9392332ce2ff3 name: ops-manager-image-repository-8-0-5 @@ -1287,19 +1287,19 @@ spec: name: mongodb-image-4-4-12-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1d0caf8dbe8796046f92d98541f152cded564f5a0d8e2b3af53d48aa6e3b9793 name: mongodb_image_8_0_0_ubi9 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d + - image: quay.io/mongodb/mongodb-agent@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d name: agent_image_108_0_2_8729_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:b2c7f20afba8fdec9430963e6c930cf5434d7dfc4244974876cfb0a73ab3611e name: ops-manager-image-repository-7-0-14 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:000af8d5dda39470ec167562a9880dca06dfe9dca9564ceaf74955b2527bc776 name: mongodb-image-8-0-0-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c + - image: quay.io/mongodb/mongodb-agent@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c name: agent_image_12_0_35_7911_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:df6081325ea2fe01d1b27d0c70005dec8232aaf51a43d91661b722b8a1761263 name: mongodb_image_5_0_6_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 + - image: quay.io/mongodb/mongodb-agent@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 name: agent_image_108_0_1_8718_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d + - image: quay.io/mongodb/mongodb-agent@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d name: agent_image_108_0_1_8718_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:9f76f053a61b5d25cadbf97c7804e3e94af337ae5c2e6cecdcafc22fb9649542 name: ops-manager-image-repository-8-0-3 @@ -1307,19 +1307,19 @@ spec: name: mongodb-image-4-4-20-ubi8 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:b2c7f20afba8fdec9430963e6c930cf5434d7dfc4244974876cfb0a73ab3611e name: ops_manager_image_repository_7_0_14 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 + - image: quay.io/mongodb/mongodb-agent@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 name: agent-image-107-0-10-8627-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:ae7d722c78954f0089fc6868166ceb19bb910dfeefd7acdc851d21014bd4aab1 name: ops-manager-image-repository-7-0-10 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 + - image: quay.io/mongodb/mongodb-agent@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 name: agent_image_108_0_2_8729_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c + - image: quay.io/mongodb/mongodb-agent@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c name: agent_image_108_0_3_8758_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 + - image: quay.io/mongodb/mongodb-agent@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 name: agent_image_108_0_4_8770_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:d65a63011d54cee08f8fa1e1bb608707070d98e5f86e3a455345c996c7e53743 name: ops_manager_image_repository_8_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 + - image: quay.io/mongodb/mongodb-agent@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 name: agent-image-108-0-1-8718-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:f2d9a3264a8814f33143b73c56624572fb80afeab184133d3a901713a31c1a87 name: mongodb-image-5-0-4-ubi8 @@ -1327,11 +1327,11 @@ spec: name: ops-manager-image-repository-6-0-27 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:7885ab6e88214f9289d8babf5189136dde0dd4c84a15a5481ccd3ef512b2e66f name: mongodb-image-4-4-3-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 + - image: quay.io/mongodb/mongodb-agent@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 name: agent_image_108_0_0_8694_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 + - image: quay.io/mongodb/mongodb-agent@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 name: agent-image-107-0-15-8741-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 + - image: quay.io/mongodb/mongodb-agent@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 name: agent-image-13-36-0-9555-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:090f39cd6d13131620b42154a615ec5ae98c41f1f2839529ca00b1c77ca76c05 name: ops-manager-image-repository-7-0-15 @@ -1343,15 +1343,15 @@ spec: name: mongodb-image-4-4-9-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:aa300abf739e26350a258f643fa8e57aedacd041055c7b923bd22b21ef554cdb name: mongodb-image-5-0-7-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 + - image: quay.io/mongodb/mongodb-agent@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 name: agent_image_107_0_11_8645_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 + - image: quay.io/mongodb/mongodb-agent@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 name: agent_image_107_0_13_8702_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 + - image: quay.io/mongodb/mongodb-agent@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 name: agent_image_107_0_13_8702_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:07761fb6f03b6517374fc788b02a655e1e8788a8b81e754b45199898f44f08b8 name: mongodb_image_6_0_2_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 + - image: quay.io/mongodb/mongodb-agent@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 name: agent-image-12-0-34-7888-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:b38cb111e5bab1cebde78d10e566297984a29ae37c0086f6976ffc388d9cc33a name: ops-manager-image-repository-7-0-12 @@ -1367,7 +1367,7 @@ spec: name: mongodb-image-4-4-15-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:d49417ea85ee1ddb84429552a5aac87a82c2843904d8c0300a43b657b7a49e0e name: mongodb-image-5-0-1-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c + - image: quay.io/mongodb/mongodb-agent@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c name: agent_image_13_36_0_9555_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:e424ff1ac407e75edadde07d2c0e7d76759b1ad96d57273a68386d3f6710dadf name: ops_manager_image_repository_6_0_25 @@ -1381,43 +1381,43 @@ spec: name: ops_manager_image_repository_8_0_4 - image: quay.io/mongodb/mongodb-kubernetes-init-database@sha256:3015a971a103750cf42be6112379f2d47c26f1df21aca972e4af3def8e831651 name: init-database-image-repository-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c + - image: quay.io/mongodb/mongodb-agent@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c name: agent-image-108-0-7-8810-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:aa8683500e8d3632c4588bbcdddb4e3186a01f48aefe1b6d95f8860dae8b6c76 name: mongodb-image-4-4-21-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:31fa3832e9ca9151e7221c88596daf820bd6c12c2436e0a2e51664bad2a826a4 name: mongodb-image-5-0-13-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 + - image: quay.io/mongodb/mongodb-agent@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 name: agent_image_107_0_15_8741_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c + - image: quay.io/mongodb/mongodb-agent@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c name: agent_image_12_0_35_7911_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 + - image: quay.io/mongodb/mongodb-agent@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 name: agent-image-107-0-13-8702-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 + - image: quay.io/mongodb/mongodb-agent@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 name: agent-image-107-0-13-8702-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 + - image: quay.io/mongodb/mongodb-agent@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 name: agent-image-108-0-3-8758-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:67238129df09d33ee4746ff1acb5515d699a29d9f42350eae3f3f69ccb7fbf54 name: mongodb-image-4-4-16-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c + - image: quay.io/mongodb/mongodb-agent@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c name: agent_image_107_0_15_8741_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:85c1d669c073caf9cbaac815e4c8ea2ca18b71deefa03bf84cd6d8b8ebd53e12 name: mongodb_image_4_4_19_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:7c60e5b7214377f78a6db305c613e22dd048793a1e001c474ef4ecafe687a046 name: mongodb_image_5_0_18_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 + - image: quay.io/mongodb/mongodb-agent@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 name: agent-image-12-0-34-7888-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 + - image: quay.io/mongodb/mongodb-agent@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 name: agent-image-12-0-35-7911-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:0fde236fe26d5b20210d6f246aadacd7845874712def516cbe2eec11d79d5181 name: mongodb-image-4-4-5-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3ceed938a6eee71b7c21fe9e848e2d5ef8835b8e8c0e1a7ce3bee9b319d302f6 name: mongodb-image-6-0-5-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac + - image: quay.io/mongodb/mongodb-agent@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac name: agent_image_108_0_0_8694_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:7885ab6e88214f9289d8babf5189136dde0dd4c84a15a5481ccd3ef512b2e66f name: mongodb_image_4_4_3_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 + - image: quay.io/mongodb/mongodb-agent@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 name: agent-image-12-0-33-7866-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:cb2992beecff3b95d31b34ab7f3fc01b57df6278f0ace53c8ec9392332ce2ff3 name: ops_manager_image_repository_8_0_5 @@ -1425,7 +1425,7 @@ spec: name: mongodb_image_4_4_1_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:67238129df09d33ee4746ff1acb5515d699a29d9f42350eae3f3f69ccb7fbf54 name: mongodb_image_4_4_16_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 + - image: quay.io/mongodb/mongodb-agent@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 name: agent-image-107-0-12-8669-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:fad91b02e0b484a9903879c5a851c78fce1af5ce84254907b70c4d9b1bd47970 name: mongodb_image_5_0_15_ubi8 @@ -1433,37 +1433,37 @@ spec: name: mongodb-image-6-0-1-ubi8 - image: quay.io/mongodb/mongodb-kubernetes-database@sha256:9b225a678108af95fa0305da5c9b811b80abc893774994f32126154a9864a17a name: mongodb-enterprise-database-image-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 + - image: quay.io/mongodb/mongodb-agent@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 name: agent-image-108-0-4-8770-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:6e53c6d5cd2995555eafdf4984880b8814d056396985f8b7b38e28f38133ee29 name: mongodb-image-5-0-10-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 + - image: quay.io/mongodb/mongodb-agent@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 name: agent_image_107_0_12_8669_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc + - image: quay.io/mongodb/mongodb-agent@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc name: agent_image_108_0_6_8796_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:523566f44cbb6bbf0cd12b1adae939a883f2254c78dc4b8c9d0098581337b1db name: mongodb_image_4_4_18_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1f096462174cc20e960bdc62850721047a9e691aacf40334d5e7696dd98e44de name: mongodb-image-6-0-3-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 + - image: quay.io/mongodb/mongodb-agent@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 name: agent_image_107_0_13_8702_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 + - image: quay.io/mongodb/mongodb-agent@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 name: agent_image_108_0_7_8810_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee + - image: quay.io/mongodb/mongodb-agent@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee name: agent_image_108_0_7_8810_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 + - image: quay.io/mongodb/mongodb-agent@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 name: agent_image_12_0_34_7888_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:a48a4d5c275fd669a27469649fb39ea38c728dbcf9f0a9da4e7cf27a2ce912ed name: mongodb-image-5-0-2-ubi8 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:090f39cd6d13131620b42154a615ec5ae98c41f1f2839529ca00b1c77ca76c05 name: ops_manager_image_repository_7_0_15 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d + - image: quay.io/mongodb/mongodb-agent@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d name: agent-image-108-0-1-8718-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad + - image: quay.io/mongodb/mongodb-agent@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad name: agent-image-13-36-0-9555-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f + - image: quay.io/mongodb/mongodb-agent@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f name: agent_image_107_0_12_8669_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 + - image: quay.io/mongodb/mongodb-agent@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 name: agent_image_12_0_33_7866_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:ef732e3eea91cc86caf2eabedc0c9115ed235a8d204f33e74bbf090a0b0259fb name: mongodb_image_4_4_15_ubi8 @@ -1475,33 +1475,33 @@ spec: name: mongodb-image-4-4-2-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:523566f44cbb6bbf0cd12b1adae939a883f2254c78dc4b8c9d0098581337b1db name: mongodb-image-4-4-18-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf + - image: quay.io/mongodb/mongodb-agent@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf name: agent_image_107_0_10_8627_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3add8b68bfbaa42596fe5dbbee4623ba5ff1a826b18bb71301525adf574dcb94 name: mongodb_image_5_0_3_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:46b1c30bb2e68c5f2367ba8517d0976c63aa17932a924f149d71982aa0f2fbdd name: mongodb-image-5-0-16-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 + - image: quay.io/mongodb/mongodb-agent@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 name: agent_image_107_0_10_8627_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 + - image: quay.io/mongodb/mongodb-agent@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 name: agent_image_108_0_3_8758_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:81a372b444ad45df22965b41fa393bd49d987c136411294bca0afb5f35b02b13 name: ops_manager_image_repository_7_0_13 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 + - image: quay.io/mongodb/mongodb-agent@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 name: agent-image-12-0-33-7866-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:037392b561f59674e70b9cde634afd5df09b02f9e61c18ba946d812701c69e2c name: mongodb_image_5_0_14_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3ceed938a6eee71b7c21fe9e848e2d5ef8835b8e8c0e1a7ce3bee9b319d302f6 name: mongodb_image_6_0_5_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 + - image: quay.io/mongodb/mongodb-agent@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 name: agent-image-107-0-12-8669-1-1-1-0 - image: quay.io/mongodb/mongodb-kubernetes@sha256:36afe77ce7f385b2cdfe116cdf5e9bbdb5ebdc55ba4d602f4d06c4f74cdb8777 name: mongodb-kubernetes-operator - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 + - image: quay.io/mongodb/mongodb-agent@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 name: agent_image_107_0_11_8645_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 + - image: quay.io/mongodb/mongodb-agent@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 name: agent_image_108_0_4_8770_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 + - image: quay.io/mongodb/mongodb-agent@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 name: agent-image-13-36-0-9555-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:07205cd580a01b3102ee20723fa04cbb572580945127869e02e397ebe9ce3ad6 name: ops-manager-image-repository-8-0-6 @@ -1515,7 +1515,7 @@ spec: name: mongodb_image_4_4_21_ubi8 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:6e00339f856dd4f665d2912031b3ab001f03049338ed0088022cd2cb7c1174a9 name: ops_manager_image_repository_8_0_7 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 + - image: quay.io/mongodb/mongodb-agent@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 name: agent-image-108-0-4-8770-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:c752a7f0726d21d55f204a359b85fa29cd727a5fe571b37289ae3ea52d6206b0 name: mongodb-image-4-4-1-ubi8 @@ -1525,25 +1525,25 @@ spec: name: mongodb-image-4-4-11-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:4bf33563b33984aab3f2061d7cc14e70ca737b27685ff8ab9c94015ef38d4794 name: mongodb-image-5-0-12-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 + - image: quay.io/mongodb/mongodb-agent@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 name: agent_image_107_0_13_8702_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c + - image: quay.io/mongodb/mongodb-agent@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c name: agent_image_108_0_7_8810_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 + - image: quay.io/mongodb/mongodb-agent@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 name: agent-image-107-0-11-8645-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd + - image: quay.io/mongodb/mongodb-agent@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd name: agent_image_108_0_1_8718_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe + - image: quay.io/mongodb/mongodb-agent@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe name: agent-image-108-0-7-8810-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:b38cb111e5bab1cebde78d10e566297984a29ae37c0086f6976ffc388d9cc33a name: ops_manager_image_repository_7_0_12 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 + - image: quay.io/mongodb/mongodb-agent@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 name: agent-image-108-0-4-8770-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:2debb3a685c0b1bfff41a01dabd3e024ac23014b64b442b3fad65e059cee5d2f name: ops_manager_image_repository_6_0_26 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:4bf33563b33984aab3f2061d7cc14e70ca737b27685ff8ab9c94015ef38d4794 name: mongodb_image_5_0_12_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd + - image: quay.io/mongodb/mongodb-agent@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd name: agent-image-108-0-1-8718-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:b00ac773623e1bf422b47e6d783263d2d08737865abe9abbe106d86d1ea25165 name: mongodb-image-4-4-14-ubi8 @@ -1553,35 +1553,35 @@ spec: name: mongodb_image_5_0_2_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:8c6a2eb93680bcb1ea741ac05ea63058dbc83d0c4bfea3e9530bce72a43be03e name: mongodb-image-6-0-4-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 + - image: quay.io/mongodb/mongodb-agent@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 name: agent_image_107_0_11_8645_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad + - image: quay.io/mongodb/mongodb-agent@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad name: agent_image_13_36_0_9555_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 + - image: quay.io/mongodb/mongodb-agent@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 name: agent-image-107-0-11-8645-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:862ecb37018830c81c35bf03825ecb17cca93c58f6d591bf6bd713c089636fe7 name: ops-manager-image-repository-7-0-11 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:07761fb6f03b6517374fc788b02a655e1e8788a8b81e754b45199898f44f08b8 name: mongodb-image-6-0-2-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba + - image: quay.io/mongodb/mongodb-agent@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba name: agent_image_108_0_0_8694_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 + - image: quay.io/mongodb/mongodb-agent@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 name: agent_image_108_0_3_8758_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:15e31285c8949a4666f7702df53416b0905d76945a28575a6028a876cc8a6f0f name: mongodb_image_4_4_7_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 + - image: quay.io/mongodb/mongodb-agent@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 name: agent-image-107-0-11-8645-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 + - image: quay.io/mongodb/mongodb-agent@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 name: agent-image-107-0-15-8741-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 + - image: quay.io/mongodb/mongodb-agent@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 name: agent-image-108-0-6-8796-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 + - image: quay.io/mongodb/mongodb-agent@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 name: agent_image_12_0_35_7911_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:07205cd580a01b3102ee20723fa04cbb572580945127869e02e397ebe9ce3ad6 name: ops_manager_image_repository_8_0_6 - image: quay.io/mongodb/mongodb-kubernetes-init-ops-manager@sha256:c07c502e38e5106558da1f1d210b311164ababd55a9bfd07f4a19e488156b32c name: init-ops-manager-image-repository-1-2-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 + - image: quay.io/mongodb/mongodb-agent@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 name: agent-image-108-0-1-8718-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:8445d59d491a6d3890ea9c8b421214bcd0fdcfbc420ed40497a790b79f0bc89e name: mongodb-image-4-4-17-ubi8 @@ -1591,7 +1591,7 @@ spec: name: mongodb_image_5_0_8_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:912adcba03effa0929706ae34f3d66412558298f5df2be971785877905d81723 name: mongodb-image-5-0-8-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 + - image: quay.io/mongodb/mongodb-agent@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 name: agent-image-107-0-13-8702-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:ae7d722c78954f0089fc6868166ceb19bb910dfeefd7acdc851d21014bd4aab1 name: ops_manager_image_repository_7_0_10 @@ -1599,13 +1599,13 @@ spec: name: mongodb-image-5-0-5-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:37af0913742b6de339c7e664b30f35dcef04506529e4a09a9bbbdd57643f7149 name: mongodb_image_4_4_20_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 + - image: quay.io/mongodb/mongodb-agent@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 name: agent-image-108-0-0-8694-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3da86ad85be8588c70ad1ec0bd22ce0e063ad2085d6a7373e7956e1999782ac2 name: mongodb-image-5-0-9-ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 + - image: quay.io/mongodb/mongodb-agent@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 name: agent_image_108_0_4_8770_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 + - image: quay.io/mongodb/mongodb-agent@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 name: agent_image_13_36_0_9555_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:9f76f053a61b5d25cadbf97c7804e3e94af337ae5c2e6cecdcafc22fb9649542 name: ops_manager_image_repository_8_0_3 @@ -1619,21 +1619,21 @@ spec: name: ops-manager-image-repository-7-0-13 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1f096462174cc20e960bdc62850721047a9e691aacf40334d5e7696dd98e44de name: mongodb_image_6_0_3_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 + - image: quay.io/mongodb/mongodb-agent@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 name: agent-image-108-0-7-8810-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 + - image: quay.io/mongodb/mongodb-agent@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 name: agent-image-12-0-33-7866-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 + - image: quay.io/mongodb/mongodb-agent@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 name: agent_image_107_0_15_8741_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 + - image: quay.io/mongodb/mongodb-agent@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 name: agent_image_108_0_0_8694_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a + - image: quay.io/mongodb/mongodb-agent@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a name: agent_image_12_0_33_7866_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:172e338a43df01ee5408d8d1e3cb038d2c55e77761c1a2ac6ed37959d1525140 name: mongodb_image_4_4_6_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d + - image: quay.io/mongodb/mongodb-agent@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d name: agent_image_107_0_12_8669_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 + - image: quay.io/mongodb/mongodb-agent@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 name: agent_image_12_0_34_7888_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:d663e12d43217ec28085c3471b47e0dba7ca730558de118f057e303c5d04922e name: ops_manager_image_repository_8_0_0 @@ -1641,15 +1641,15 @@ spec: name: mongodb_image_4_4_9_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:000af8d5dda39470ec167562a9880dca06dfe9dca9564ceaf74955b2527bc776 name: mongodb_image_8_0_0_ubi8 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 + - image: quay.io/mongodb/mongodb-agent@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 name: agent-image-107-0-13-8702-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c + - image: quay.io/mongodb/mongodb-agent@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c name: agent-image-12-0-35-7911-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d + - image: quay.io/mongodb/mongodb-agent@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d name: agent-image-107-0-12-8669-1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe + - image: quay.io/mongodb/mongodb-agent@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe name: agent_image_108_0_7_8810_1 - - image: quay.io/mongodb/mongodb-agent-ubi@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 + - image: quay.io/mongodb/mongodb-agent@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 name: agent_image_12_0_34_7888_1_1_0_1 replaces: mongodb-kubernetes.v1.1.0 version: 1.2.0 diff --git a/scripts/dev/release/backup_csv_images.py b/scripts/dev/release/backup_csv_images.py index 9ad3ec78b..e8a35ee00 100755 --- a/scripts/dev/release/backup_csv_images.py +++ b/scripts/dev/release/backup_csv_images.py @@ -148,12 +148,12 @@ def parse_image_url(image_url: str) -> tuple[str, str, str]: """Parse a digest-pinned image URL into registry, repository, and digest components. Args: - image_url: The digest-pinned image URL (e.g., 'quay.io/mongodb/mongodb-agent-ubi@sha256:abc123') + image_url: The digest-pinned image URL (e.g., 'quay.io/mongodb/mongodb-agent@sha256:abc123') Returns: A tuple of (registry, repository, digest) - registry: The registry part (e.g., 'quay.io') - - repository: The repository path (e.g., 'mongodb/mongodb-agent-ubi') + - repository: The repository path (e.g., 'mongodb/mongodb-agent') - digest: The image digest (e.g., 'sha256:abc123') """ @@ -186,7 +186,7 @@ def generate_backup_tag(original_image: str, original_tag: str, mck_version: str """ try: # Extract the repository name (last part of the image path) - # Example: 'quay.io/mongodb/mongodb-agent-ubi' -> 'mongodb-agent-ubi' + # Example: 'quay.io/mongodb/mongodb-agent' -> 'mongodb-agent-ubi' repo_name = original_image.split("@")[0].split("/")[-1] return f"quay.io/mongodb/{repo_name}:{original_tag}_openshift_{mck_version}" except Exception as e: diff --git a/scripts/dev/release/test_backup_script.py b/scripts/dev/release/test_backup_script.py index 7625972f0..78338951e 100644 --- a/scripts/dev/release/test_backup_script.py +++ b/scripts/dev/release/test_backup_script.py @@ -46,7 +46,7 @@ def get_test_csv(): }, { "name": "RELATED_IMAGE_AGENT", - "value": "quay.io/mongodb/mongodb-agent-ubi@sha256:ghi789jkl012", + "value": "quay.io/mongodb/mongodb-agent@sha256:ghi789jkl012", }, {"name": "REGULAR_ENV_VAR", "value": "not-an-image"}, ], @@ -61,7 +61,7 @@ def get_test_csv(): }, "relatedImages": [ {"name": "database-image", "image": "quay.io/mongodb/mongodb-enterprise-server@sha256:def456ghi789"}, - {"name": "agent-image", "image": "quay.io/mongodb/mongodb-agent-ubi@sha256:ghi789jkl012"}, + {"name": "agent-image", "image": "quay.io/mongodb/mongodb-agent@sha256:ghi789jkl012"}, {"name": "ops-manager-image", "image": "quay.io/mongodb/ops-manager@sha256:jkl012mno345"}, ], }, @@ -73,7 +73,7 @@ def test_parse_image_url(): """Test URL parsing for digest-pinned images.""" test_cases = [ ("quay.io/mongodb/operator@sha256:abc123", ("quay.io", "mongodb/operator", "sha256:abc123")), - ("quay.io/mongodb/mongodb-agent-ubi@sha256:def456", ("quay.io", "mongodb/mongodb-agent-ubi", "sha256:def456")), + ("quay.io/mongodb/mongodb-agent@sha256:def456", ("quay.io", "mongodb/mongodb-agent", "sha256:def456")), ("docker.io/library/nginx@sha256:123456", ("docker.io", "library/nginx", "sha256:123456")), ] @@ -103,8 +103,8 @@ def test_backup_tag_generation(): """Test backup tag generation.""" test_cases = [ ("quay.io/mongodb/operator@sha256:abc123", "1.31.0", "1.0.0"), - ("quay.io/mongodb/mongodb-agent-ubi@sha256:def456", "107.0.12.8669-1", "1.2.0"), - ("quay.io/mongodb/mongodb-agent-ubi@sha256:ghi789", "12.0.33.7866_1.1.1.0", "1.2.0"), + ("quay.io/mongodb/mongodb-agent@sha256:def456", "107.0.12.8669-1", "1.2.0"), + ("quay.io/mongodb/mongodb-agent@sha256:ghi789", "12.0.33.7866_1.1.1.0", "1.2.0"), ("quay.io/mongodb/mongodb-enterprise-server@sha256:jkl012", "4.4.15-ubi8", "1.0.0"), ] @@ -143,7 +143,7 @@ def test_csv_parsing(): # Should find 3 unique images from relatedImages section expected_image_urls = { "quay.io/mongodb/mongodb-enterprise-server@sha256:def456ghi789", - "quay.io/mongodb/mongodb-agent-ubi@sha256:ghi789jkl012", + "quay.io/mongodb/mongodb-agent@sha256:ghi789jkl012", "quay.io/mongodb/ops-manager@sha256:jkl012mno345", } assert ( @@ -158,7 +158,7 @@ def test_csv_parsing(): # Format: quay.io/mongodb/{repo_name}:{original_tag}_openshift_{version} expected_tags = { "quay.io/mongodb/mongodb-enterprise-server@sha256:def456ghi789": ("mongodb-enterprise-server", "1.0.0"), - "quay.io/mongodb/mongodb-agent-ubi@sha256:ghi789jkl012": ("mongodb-agent-ubi", "1.0.0"), + "quay.io/mongodb/mongodb-agent@sha256:ghi789jkl012": ("mongodb-agent-ubi", "1.0.0"), "quay.io/mongodb/ops-manager@sha256:jkl012mno345": ("ops-manager", "1.0.0"), } diff --git a/scripts/release/tests/build_info_test.py b/scripts/release/tests/build_info_test.py index 4dac69b68..74e06523f 100644 --- a/scripts/release/tests/build_info_test.py +++ b/scripts/release/tests/build_info_test.py @@ -409,7 +409,7 @@ def test_load_build_info_release( sign=True, ), "agent": ImageInfo( - repository="quay.io/mongodb/mongodb-agent-ubi", + repository="quay.io/mongodb/mongodb-agent", platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", From 89f4f9d50e52ef9e5b4e53317e8936d86af35a68 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 20 Aug 2025 17:15:26 +0200 Subject: [PATCH 151/164] migrate repo from mongodb/mongodb-agent-ubi to mongodb/mongodb-agent --- .../release/1.2.0.clusterserviceversion.yaml | 390 +++++++++--------- 1 file changed, 195 insertions(+), 195 deletions(-) diff --git a/scripts/dev/release/1.2.0.clusterserviceversion.yaml b/scripts/dev/release/1.2.0.clusterserviceversion.yaml index a8cbc7ce2..6618e522d 100644 --- a/scripts/dev/release/1.2.0.clusterserviceversion.yaml +++ b/scripts/dev/release/1.2.0.clusterserviceversion.yaml @@ -676,9 +676,9 @@ spec: - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 - name: MDB_AGENT_IMAGE_REPOSITORY - value: quay.io/mongodb/mongodb-agent + value: quay.io/mongodb/mongodb-agent-ubi - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL @@ -698,7 +698,7 @@ spec: - name: OPERATOR_NAME value: mongodb-kubernetes-operator - name: MDB_COMMUNITY_AGENT_IMAGE - value: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 - name: VERSION_UPGRADE_HOOK_IMAGE value: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.9 - name: READINESS_PROBE_IMAGE @@ -718,133 +718,133 @@ spec: - name: RELATED_IMAGE_INIT_APPDB_IMAGE_REPOSITORY_1_2_0 value: quay.io/mongodb/mongodb-kubernetes-init-appdb@sha256:2230112283c5ab3a7f8d9b37d9f98dd9e13960f8b3eff467366c0382bcf7e3fd - name: RELATED_IMAGE_AGENT_IMAGE_107_0_10_8627_1 - value: quay.io/mongodb/mongodb-agent@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_10_8627_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae + value: quay.io/mongodb/mongodb-agent-ubi@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae - name: RELATED_IMAGE_AGENT_IMAGE_107_0_10_8627_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_10_8627_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf + value: quay.io/mongodb/mongodb-agent-ubi@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1 - value: quay.io/mongodb/mongodb-agent@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1 - value: quay.io/mongodb/mongodb-agent@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d + value: quay.io/mongodb/mongodb-agent-ubi@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f + value: quay.io/mongodb/mongodb-agent-ubi@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1 - value: quay.io/mongodb/mongodb-agent@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1 - value: quay.io/mongodb/mongodb-agent@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c + value: quay.io/mongodb/mongodb-agent-ubi@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1 - value: quay.io/mongodb/mongodb-agent@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba + value: quay.io/mongodb/mongodb-agent-ubi@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac + value: quay.io/mongodb/mongodb-agent-ubi@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1 - value: quay.io/mongodb/mongodb-agent@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d + value: quay.io/mongodb/mongodb-agent-ubi@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd + value: quay.io/mongodb/mongodb-agent-ubi@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1 - value: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe + value: quay.io/mongodb/mongodb-agent-ubi@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d + value: quay.io/mongodb/mongodb-agent-ubi@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1 - value: quay.io/mongodb/mongodb-agent@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c + value: quay.io/mongodb/mongodb-agent-ubi@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1 - value: quay.io/mongodb/mongodb-agent@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1 - value: quay.io/mongodb/mongodb-agent@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d + value: quay.io/mongodb/mongodb-agent-ubi@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc + value: quay.io/mongodb/mongodb-agent-ubi@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1 - value: quay.io/mongodb/mongodb-agent@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe + value: quay.io/mongodb/mongodb-agent-ubi@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c + value: quay.io/mongodb/mongodb-agent-ubi@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee + value: quay.io/mongodb/mongodb-agent-ubi@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee - name: RELATED_IMAGE_AGENT_IMAGE_12_0_33_7866_1 - value: quay.io/mongodb/mongodb-agent@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_33_7866_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_33_7866_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a + value: quay.io/mongodb/mongodb-agent-ubi@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a - name: RELATED_IMAGE_AGENT_IMAGE_12_0_33_7866_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1 - value: quay.io/mongodb/mongodb-agent@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1 - value: quay.io/mongodb/mongodb-agent@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c + value: quay.io/mongodb/mongodb-agent-ubi@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c + value: quay.io/mongodb/mongodb-agent-ubi@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 - name: RELATED_IMAGE_AGENT_IMAGE_13_36_0_9555_1 - value: quay.io/mongodb/mongodb-agent@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c + value: quay.io/mongodb/mongodb-agent-ubi@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c - name: RELATED_IMAGE_AGENT_IMAGE_13_36_0_9555_1_1_0_1 - value: quay.io/mongodb/mongodb-agent@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 - name: RELATED_IMAGE_AGENT_IMAGE_13_36_0_9555_1_1_1_0 - value: quay.io/mongodb/mongodb-agent@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 + value: quay.io/mongodb/mongodb-agent-ubi@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 - name: RELATED_IMAGE_AGENT_IMAGE_13_36_0_9555_1_1_2_0 - value: quay.io/mongodb/mongodb-agent@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad + value: quay.io/mongodb/mongodb-agent-ubi@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_25 value: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:e424ff1ac407e75edadde07d2c0e7d76759b1ad96d57273a68386d3f6710dadf - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_26 @@ -1107,11 +1107,11 @@ spec: provider: name: MongoDB, Inc relatedImages: - - image: quay.io/mongodb/mongodb-agent@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 name: agent-image-107-0-10-8627-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d name: agent-image-108-0-2-8729-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 name: agent_image_108_0_2_8729_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:b00ac773623e1bf422b47e6d783263d2d08737865abe9abbe106d86d1ea25165 name: mongodb_image_4_4_14_ubi8 @@ -1121,33 +1121,33 @@ spec: name: mongodb_image_5_0_10_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:df6081325ea2fe01d1b27d0c70005dec8232aaf51a43d91661b722b8a1761263 name: mongodb-image-5-0-6-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 name: agent-image-108-0-2-8729-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 name: agent_image_12_0_35_7911_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:9a3a6496be0d2d446e6d654eb56386c7726e0cd2d7232f6dcec579923edf0014 name: mongodb_image_4_4_4_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:11ab324698f919f84a5dab5a964d93fe63f98a48146bb3b86974cba7606d30ec name: mongodb_image_4_4_8_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d name: agent-image-108-0-6-8796-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 name: agent-image-108-0-4-8770-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:037392b561f59674e70b9cde634afd5df09b02f9e61c18ba946d812701c69e2c name: mongodb-image-5-0-14-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:fe3e7635cbdc89b0015e509fe310aed1a5812180d03cc9d1816a87bfdbbc094f name: mongodb-image-5-0-17-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae name: agent_image_107_0_10_8627_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c name: agent-image-107-0-15-8741-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee name: agent-image-108-0-7-8810-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:33a30d11969eb21952681108092fe7f916e25c8e1de43e6a4e40e126025a2bec name: mongodb-image-4-4-10-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 name: agent_image_13_36_0_9555_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 name: agent-image-107-0-11-8645-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:5545696bf7e536c0030f5f5682be2fcdde5213603095ba05ad937e3e43dcc03d name: ops-manager-image-repository-8-0-4 @@ -1155,15 +1155,15 @@ spec: name: ops_manager_image_repository_8_0_2 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:dfd4277f0c16ed476ac7de51894739e8c8d13a65ea5024b9a97b3dce0f6c6975 name: mongodb_image_4_4_13_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a name: agent-image-12-0-33-7866-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 name: agent-image-12-0-34-7888-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 name: agent_image_107_0_15_8741_1 - - image: quay.io/mongodb/mongodb-agent@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe name: agent_image_108_0_2_8729_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 name: agent_image_108_0_3_8758_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3da86ad85be8588c70ad1ec0bd22ce0e063ad2085d6a7373e7956e1999782ac2 name: mongodb_image_5_0_9_ubi8 @@ -1171,11 +1171,11 @@ spec: name: mdb_search_image_1_47_0 - image: quay.io/mongodb/mongodb-kubernetes-init-appdb@sha256:2230112283c5ab3a7f8d9b37d9f98dd9e13960f8b3eff467366c0382bcf7e3fd name: init-appdb-image-repository-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9a9575cb7fdaa649139180f78ba05b5c0fe00c7f7cab40a840aa87d88f379cae name: agent-image-107-0-10-8627-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 name: agent-image-108-0-0-8694-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 name: agent-image-108-0-3-8758-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:e424ff1ac407e75edadde07d2c0e7d76759b1ad96d57273a68386d3f6710dadf name: ops-manager-image-repository-6-0-25 @@ -1183,11 +1183,11 @@ spec: name: mongodb-image-5-0-3-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:fad91b02e0b484a9903879c5a851c78fce1af5ce84254907b70c4d9b1bd47970 name: mongodb-image-5-0-15-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f name: agent-image-107-0-12-8669-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c name: agent-image-12-0-35-7911-1 - - image: quay.io/mongodb/mongodb-agent@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 name: agent_image_108_0_1_8718_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:09f8e1052e572fa2c54611109b237cdd834f69b43412ec2deecab3a7427e0a78 name: ops_manager_image_repository_6_0_27 @@ -1199,41 +1199,41 @@ spec: name: mongodb-image-5-0-0-ubi8 - image: quay.io/mongodb/mongodb-kubernetes-init-ops-manager@sha256:c07c502e38e5106558da1f1d210b311164ababd55a9bfd07f4a19e488156b32c name: init_ops_manager_image_repository_1_2_0 - - image: quay.io/mongodb/mongodb-agent@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 name: agent_image_107_0_12_8669_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:19925e1297ab652db59c35f53e7b7c8b2b277f8ecdc4215682961aaf2b3e924e name: mongodb_image_4_4_0_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:8445d59d491a6d3890ea9c8b421214bcd0fdcfbc420ed40497a790b79f0bc89e name: mongodb_image_4_4_17_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c name: agent-image-108-0-3-8758-1 - - image: quay.io/mongodb/mongodb-agent@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 name: agent-image-108-0-6-8796-1 - - image: quay.io/mongodb/mongodb-agent@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0b9f79fc01e0e4841e2b33f864f58c0a16e51be505f30f2aa5512af1bd832a14 name: agent-image-12-0-35-7911-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:d65a63011d54cee08f8fa1e1bb608707070d98e5f86e3a455345c996c7e53743 name: ops-manager-image-repository-8-0-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:11ab324698f919f84a5dab5a964d93fe63f98a48146bb3b86974cba7606d30ec name: mongodb-image-4-4-8-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 name: agent_image_107_0_11_8645_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:29e591f43586d9e0fb2d7deabfedbb276361e01708d556ed7b72d25d15eda63d name: agent_image_108_0_6_8796_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba name: agent-image-108-0-0-8694-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 name: agent_image_108_0_4_8770_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4487d7b1110d5efe3321c67e33ed648e525e5fce6d4810b7f1654291a9c21090 name: agent_image_108_0_6_8796_1 - - image: quay.io/mongodb/mongodb-agent@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 name: agent_image_12_0_33_7866_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:862ecb37018830c81c35bf03825ecb17cca93c58f6d591bf6bd713c089636fe7 name: ops_manager_image_repository_7_0_11 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:0fde236fe26d5b20210d6f246aadacd7845874712def516cbe2eec11d79d5181 name: mongodb_image_4_4_5_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac name: agent-image-108-0-0-8694-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:bff84d4b4eac87c22a11a3d3925fe1f3d17f99fa3d8b24ca452d584757a6a372 name: agent-image-108-0-3-8758-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:f2d9a3264a8814f33143b73c56624572fb80afeab184133d3a901713a31c1a87 name: mongodb_image_5_0_4_ubi8 @@ -1245,41 +1245,41 @@ spec: name: mongodb_enterprise_database_image_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:2bcdb6e94e4436d84950eb1eea40cd5a7f6d33624b9f5f9f40a13e427d8fca9c name: mongodb_image_6_0_0_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 name: agent_image_12_0_34_7888_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf name: agent-image-107-0-10-8627-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4da6f4885e79f8343087afe54642dc92816386b454ed061c6c495d0be2122cbe name: agent-image-108-0-2-8729-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc name: agent-image-108-0-6-8796-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 name: agent-image-12-0-34-7888-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:2bcdb6e94e4436d84950eb1eea40cd5a7f6d33624b9f5f9f40a13e427d8fca9c name: mongodb-image-6-0-0-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1d0caf8dbe8796046f92d98541f152cded564f5a0d8e2b3af53d48aa6e3b9793 name: mongodb-image-8-0-0-ubi9 - - image: quay.io/mongodb/mongodb-agent@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 name: agent_image_107_0_10_8627_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:d49417ea85ee1ddb84429552a5aac87a82c2843904d8c0300a43b657b7a49e0e name: mongodb_image_5_0_1_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 name: agent-image-107-0-15-8741-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:15e31285c8949a4666f7702df53416b0905d76945a28575a6028a876cc8a6f0f name: mongodb-image-4-4-7-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:11dba8ee4f2706d952c42ecf09a76a0157c231a981d248dc22e255beed5d74d1 name: mongodb-image-5-0-11-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 name: agent_image_108_0_6_8796_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 name: agent_image_12_0_33_7866_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:8d7e5361c2a1604dec3fc671d629806fe6ddc6df524b69a441b86933a4b30093 name: mongodb_image_4_4_2_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1d418c65d82ccad205b7889be4ba161d3cbfa8b1fb75b21c8fcf02de38fad9ca name: mongodb_image_4_4_11_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4649945c78a0e9d2d456e326bbac6e657236041ed6b7849b3ea7ec704763c9e0 name: agent-image-108-0-2-8729-1 - - image: quay.io/mongodb/mongodb-agent@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c name: agent-image-13-36-0-9555-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:cb2992beecff3b95d31b34ab7f3fc01b57df6278f0ace53c8ec9392332ce2ff3 name: ops-manager-image-repository-8-0-5 @@ -1287,19 +1287,19 @@ spec: name: mongodb-image-4-4-12-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1d0caf8dbe8796046f92d98541f152cded564f5a0d8e2b3af53d48aa6e3b9793 name: mongodb_image_8_0_0_ubi9 - - image: quay.io/mongodb/mongodb-agent@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:596712619d1b115aba9d645d0391f2e76fae307a2c3df1f9ca19f1bbde3b7f4d name: agent_image_108_0_2_8729_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:b2c7f20afba8fdec9430963e6c930cf5434d7dfc4244974876cfb0a73ab3611e name: ops-manager-image-repository-7-0-14 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:000af8d5dda39470ec167562a9880dca06dfe9dca9564ceaf74955b2527bc776 name: mongodb-image-8-0-0-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c name: agent_image_12_0_35_7911_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:df6081325ea2fe01d1b27d0c70005dec8232aaf51a43d91661b722b8a1761263 name: mongodb_image_5_0_6_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 name: agent_image_108_0_1_8718_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d name: agent_image_108_0_1_8718_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:9f76f053a61b5d25cadbf97c7804e3e94af337ae5c2e6cecdcafc22fb9649542 name: ops-manager-image-repository-8-0-3 @@ -1307,19 +1307,19 @@ spec: name: mongodb-image-4-4-20-ubi8 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:b2c7f20afba8fdec9430963e6c930cf5434d7dfc4244974876cfb0a73ab3611e name: ops_manager_image_repository_7_0_14 - - image: quay.io/mongodb/mongodb-agent@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:55ad1aac025933b741dbf2b2bf87baa36979fc2c2d7f68bfb737f8bc897612b8 name: agent-image-107-0-10-8627-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:ae7d722c78954f0089fc6868166ceb19bb910dfeefd7acdc851d21014bd4aab1 name: ops-manager-image-repository-7-0-10 - - image: quay.io/mongodb/mongodb-agent@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0ff66dd59a9d542e650f6c3d6788f82c9fc21fd758ec022bd9f876a989251a86 name: agent_image_108_0_2_8729_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:fda1518e08568a39fefef073e8d5eba117e029ca14bbf2442fd8664a1452790c name: agent_image_108_0_3_8758_1 - - image: quay.io/mongodb/mongodb-agent@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 name: agent_image_108_0_4_8770_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:d65a63011d54cee08f8fa1e1bb608707070d98e5f86e3a455345c996c7e53743 name: ops_manager_image_repository_8_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:5ce68f13f72ae3b9786840f8bf1071b839f484f874f84c64368275fdacdac3d3 name: agent-image-108-0-1-8718-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:f2d9a3264a8814f33143b73c56624572fb80afeab184133d3a901713a31c1a87 name: mongodb-image-5-0-4-ubi8 @@ -1327,11 +1327,11 @@ spec: name: ops-manager-image-repository-6-0-27 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:7885ab6e88214f9289d8babf5189136dde0dd4c84a15a5481ccd3ef512b2e66f name: mongodb-image-4-4-3-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:fe71c534c4b8442e103c13655e7209d261496bed414f93681bfdcf84902772c5 name: agent_image_108_0_0_8694_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:875049c8947d6181de1a1f45db699d297f986885837119438e111d97ca059074 name: agent-image-107-0-15-8741-1 - - image: quay.io/mongodb/mongodb-agent@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 name: agent-image-13-36-0-9555-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:090f39cd6d13131620b42154a615ec5ae98c41f1f2839529ca00b1c77ca76c05 name: ops-manager-image-repository-7-0-15 @@ -1343,15 +1343,15 @@ spec: name: mongodb-image-4-4-9-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:aa300abf739e26350a258f643fa8e57aedacd041055c7b923bd22b21ef554cdb name: mongodb-image-5-0-7-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:71f681014fd1bf8113637e0f403381f4ba1c40b0a4869199d1563bb91334a846 name: agent_image_107_0_11_8645_1 - - image: quay.io/mongodb/mongodb-agent@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 name: agent_image_107_0_13_8702_1 - - image: quay.io/mongodb/mongodb-agent@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 name: agent_image_107_0_13_8702_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:07761fb6f03b6517374fc788b02a655e1e8788a8b81e754b45199898f44f08b8 name: mongodb_image_6_0_2_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:406aabaef8a2e9822ea69a41c81ecf4d4f1f73ac6608dbffca59007032b600b3 name: agent-image-12-0-34-7888-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:b38cb111e5bab1cebde78d10e566297984a29ae37c0086f6976ffc388d9cc33a name: ops-manager-image-repository-7-0-12 @@ -1367,7 +1367,7 @@ spec: name: mongodb-image-4-4-15-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:d49417ea85ee1ddb84429552a5aac87a82c2843904d8c0300a43b657b7a49e0e name: mongodb-image-5-0-1-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9ac472f3256b2a5e8ad1ad0237925449eb26f8c363314130ad876378af98d63c name: agent_image_13_36_0_9555_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:e424ff1ac407e75edadde07d2c0e7d76759b1ad96d57273a68386d3f6710dadf name: ops_manager_image_repository_6_0_25 @@ -1381,43 +1381,43 @@ spec: name: ops_manager_image_repository_8_0_4 - image: quay.io/mongodb/mongodb-kubernetes-init-database@sha256:3015a971a103750cf42be6112379f2d47c26f1df21aca972e4af3def8e831651 name: init-database-image-repository-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c name: agent-image-108-0-7-8810-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:aa8683500e8d3632c4588bbcdddb4e3186a01f48aefe1b6d95f8860dae8b6c76 name: mongodb-image-4-4-21-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:31fa3832e9ca9151e7221c88596daf820bd6c12c2436e0a2e51664bad2a826a4 name: mongodb-image-5-0-13-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:802efc032909aff521e2d9a667a371f6fbf579247f0bdfa18377eedb003f06a0 name: agent_image_107_0_15_8741_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3bb3155da1c3700a25caffd55f1b63266fee9055732f32451728615758604a0c name: agent_image_12_0_35_7911_1 - - image: quay.io/mongodb/mongodb-agent@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:cee210ac2ec4b31e03e93aeb3c4c881f96076ad8cc972772d4c2f367502d23d9 name: agent-image-107-0-13-8702-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 name: agent-image-107-0-13-8702-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 name: agent-image-108-0-3-8758-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:67238129df09d33ee4746ff1acb5515d699a29d9f42350eae3f3f69ccb7fbf54 name: mongodb-image-4-4-16-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:569759bf8dfe2b597fac2d6ac78617ceacc135c7f823c3dce50c73e35b312c1c name: agent_image_107_0_15_8741_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:85c1d669c073caf9cbaac815e4c8ea2ca18b71deefa03bf84cd6d8b8ebd53e12 name: mongodb_image_4_4_19_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:7c60e5b7214377f78a6db305c613e22dd048793a1e001c474ef4ecafe687a046 name: mongodb_image_5_0_18_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 name: agent-image-12-0-34-7888-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 name: agent-image-12-0-35-7911-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:0fde236fe26d5b20210d6f246aadacd7845874712def516cbe2eec11d79d5181 name: mongodb-image-4-4-5-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3ceed938a6eee71b7c21fe9e848e2d5ef8835b8e8c0e1a7ce3bee9b319d302f6 name: mongodb-image-6-0-5-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:21862e6964962d414a7b3780372685fd94bc9ecd042a21ddf63d26a302a298ac name: agent_image_108_0_0_8694_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:7885ab6e88214f9289d8babf5189136dde0dd4c84a15a5481ccd3ef512b2e66f name: mongodb_image_4_4_3_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:593e11c50e8eca1e400c9da870f0359c81c28e63a6e1b1677e565bf5af36a5b3 name: agent-image-12-0-33-7866-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:cb2992beecff3b95d31b34ab7f3fc01b57df6278f0ace53c8ec9392332ce2ff3 name: ops_manager_image_repository_8_0_5 @@ -1425,7 +1425,7 @@ spec: name: mongodb_image_4_4_1_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:67238129df09d33ee4746ff1acb5515d699a29d9f42350eae3f3f69ccb7fbf54 name: mongodb_image_4_4_16_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:2501709c5f245d212b703d1be92847f2d9c3c5193725767a0237f760de7830e4 name: agent-image-107-0-12-8669-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:fad91b02e0b484a9903879c5a851c78fce1af5ce84254907b70c4d9b1bd47970 name: mongodb_image_5_0_15_ubi8 @@ -1433,37 +1433,37 @@ spec: name: mongodb-image-6-0-1-ubi8 - image: quay.io/mongodb/mongodb-kubernetes-database@sha256:9b225a678108af95fa0305da5c9b811b80abc893774994f32126154a9864a17a name: mongodb-enterprise-database-image-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:805b4c1efe9bddaba4c712ead9d7327ae9b811683fd3c3bea7e2b70e42637821 name: agent-image-108-0-4-8770-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:6e53c6d5cd2995555eafdf4984880b8814d056396985f8b7b38e28f38133ee29 name: mongodb-image-5-0-10-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 name: agent_image_107_0_12_8669_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:e1b64329377ff9fc47f4a80f92c54cd9654e69a6aec24218f08bd8d0b06fc7cc name: agent_image_108_0_6_8796_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:523566f44cbb6bbf0cd12b1adae939a883f2254c78dc4b8c9d0098581337b1db name: mongodb_image_4_4_18_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1f096462174cc20e960bdc62850721047a9e691aacf40334d5e7696dd98e44de name: mongodb-image-6-0-3-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:d13c3daebf5f0bdf3dfdbf611f02fe89b323be20002a390491a91f3fb6b000e8 name: agent_image_107_0_13_8702_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 name: agent_image_108_0_7_8810_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:78baa2581c7b181aae5b70cf1c78a78cfe4aa7ea31bbfe93ebd238fbaa58b6ee name: agent_image_108_0_7_8810_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b8b3a9bec5d7b550a7dc7d152eeb151f68832d67707328debc6da27be53d2100 name: agent_image_12_0_34_7888_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:a48a4d5c275fd669a27469649fb39ea38c728dbcf9f0a9da4e7cf27a2ce912ed name: mongodb-image-5-0-2-ubi8 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:090f39cd6d13131620b42154a615ec5ae98c41f1f2839529ca00b1c77ca76c05 name: ops_manager_image_repository_7_0_15 - - image: quay.io/mongodb/mongodb-agent@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:288cd702221820fdeb1df5a0c31fb83bd625de32a3736ca5fbac1dee1a136f7d name: agent-image-108-0-1-8718-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad name: agent-image-13-36-0-9555-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:723b1816f2d41424f77c92c8b761b5bada027057feec9d9bf1acaf570e25cd4f name: agent_image_107_0_12_8669_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 name: agent_image_12_0_33_7866_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:ef732e3eea91cc86caf2eabedc0c9115ed235a8d204f33e74bbf090a0b0259fb name: mongodb_image_4_4_15_ubi8 @@ -1475,33 +1475,33 @@ spec: name: mongodb-image-4-4-2-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:523566f44cbb6bbf0cd12b1adae939a883f2254c78dc4b8c9d0098581337b1db name: mongodb-image-4-4-18-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:983db0e0b65396d0b8e8c397ab681ed0b28c4e8d922b25b9d105bd5eb920addf name: agent_image_107_0_10_8627_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3add8b68bfbaa42596fe5dbbee4623ba5ff1a826b18bb71301525adf574dcb94 name: mongodb_image_5_0_3_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:46b1c30bb2e68c5f2367ba8517d0976c63aa17932a924f149d71982aa0f2fbdd name: mongodb-image-5-0-16-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b17ea63780448ccbc6fb6e92c4f919e1bc3bdcbb33c1fdd6f969a55f111dcc07 name: agent_image_107_0_10_8627_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:60b0659e4d248df28af4882df29f847934887654edba24d6b30d4dbb9f4ed1d0 name: agent_image_108_0_3_8758_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:81a372b444ad45df22965b41fa393bd49d987c136411294bca0afb5f35b02b13 name: ops_manager_image_repository_7_0_13 - - image: quay.io/mongodb/mongodb-agent@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:41d4e5c94e3817e65cdcd4733185c80eeef00d231041f6da06b235ab61060ed0 name: agent-image-12-0-33-7866-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:037392b561f59674e70b9cde634afd5df09b02f9e61c18ba946d812701c69e2c name: mongodb_image_5_0_14_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3ceed938a6eee71b7c21fe9e848e2d5ef8835b8e8c0e1a7ce3bee9b319d302f6 name: mongodb_image_6_0_5_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b29677ccc177a3b3e71f664bd25f3196a67d35157c1b01ba5de527a84f6e2516 name: agent-image-107-0-12-8669-1-1-1-0 - image: quay.io/mongodb/mongodb-kubernetes@sha256:36afe77ce7f385b2cdfe116cdf5e9bbdb5ebdc55ba4d602f4d06c4f74cdb8777 name: mongodb-kubernetes-operator - - image: quay.io/mongodb/mongodb-agent@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 name: agent_image_107_0_11_8645_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3b7389074fc8a3563adf311b58bee910700518b0b25a1bf05a5c94ad3050e1c6 name: agent_image_108_0_4_8770_1 - - image: quay.io/mongodb/mongodb-agent@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:376389f5a7102142aa2556c5ae73712c343800f1e2b2193e00ebae3ad3a96cc0 name: agent-image-13-36-0-9555-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:07205cd580a01b3102ee20723fa04cbb572580945127869e02e397ebe9ce3ad6 name: ops-manager-image-repository-8-0-6 @@ -1515,7 +1515,7 @@ spec: name: mongodb_image_4_4_21_ubi8 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:6e00339f856dd4f665d2912031b3ab001f03049338ed0088022cd2cb7c1174a9 name: ops_manager_image_repository_8_0_7 - - image: quay.io/mongodb/mongodb-agent@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 name: agent-image-108-0-4-8770-1-1-1-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:c752a7f0726d21d55f204a359b85fa29cd727a5fe571b37289ae3ea52d6206b0 name: mongodb-image-4-4-1-ubi8 @@ -1525,25 +1525,25 @@ spec: name: mongodb-image-4-4-11-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:4bf33563b33984aab3f2061d7cc14e70ca737b27685ff8ab9c94015ef38d4794 name: mongodb-image-5-0-12-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 name: agent_image_107_0_13_8702_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:3b332aeb8dce3734fd0a345f6dd0c240ae96ca3aa9286e2b300fc55e4f16e54c name: agent_image_108_0_7_8810_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 name: agent-image-107-0-11-8645-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd name: agent_image_108_0_1_8718_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe name: agent-image-108-0-7-8810-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:b38cb111e5bab1cebde78d10e566297984a29ae37c0086f6976ffc388d9cc33a name: ops_manager_image_repository_7_0_12 - - image: quay.io/mongodb/mongodb-agent@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:7569c3e38b39b16596ea1c53798d2d3c7dcc711d06e5a72d4646eabd36ce13f9 name: agent-image-108-0-4-8770-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:2debb3a685c0b1bfff41a01dabd3e024ac23014b64b442b3fad65e059cee5d2f name: ops_manager_image_repository_6_0_26 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:4bf33563b33984aab3f2061d7cc14e70ca737b27685ff8ab9c94015ef38d4794 name: mongodb_image_5_0_12_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:a65cd79ea5416140373edbf1ae51b8995269099307676d2d3c69f889d3bc93fd name: agent-image-108-0-1-8718-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:b00ac773623e1bf422b47e6d783263d2d08737865abe9abbe106d86d1ea25165 name: mongodb-image-4-4-14-ubi8 @@ -1553,35 +1553,35 @@ spec: name: mongodb_image_5_0_2_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:8c6a2eb93680bcb1ea741ac05ea63058dbc83d0c4bfea3e9530bce72a43be03e name: mongodb-image-6-0-4-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:97511a99fda4b0f28c98e6eac6b5b87b1a2890b0b92fba6fbb56ab4f4fabca13 name: agent_image_107_0_11_8645_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:45255201acacf6a2ebaab4ff4b331ffed3b21af14cd23d868d425c464a63a6ad name: agent_image_13_36_0_9555_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:68829b6f61b377640a0bf417eaa64974e8d702cfb2d59fb70506dcdd153ddde5 name: agent-image-107-0-11-8645-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:862ecb37018830c81c35bf03825ecb17cca93c58f6d591bf6bd713c089636fe7 name: ops-manager-image-repository-7-0-11 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:07761fb6f03b6517374fc788b02a655e1e8788a8b81e754b45199898f44f08b8 name: mongodb-image-6-0-2-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:bcbc5058eef2bd2226efa5e0dcf34d9c97c30dc452e8e9394c8435603a50e6ba name: agent_image_108_0_0_8694_1_1_0_1 - - image: quay.io/mongodb/mongodb-agent@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:7b08de82504e9c183f5a83e0a29d73eb5ef3a688c3defe84ffcc562be762f834 name: agent_image_108_0_3_8758_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:15e31285c8949a4666f7702df53416b0905d76945a28575a6028a876cc8a6f0f name: mongodb_image_4_4_7_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9e04f2087703049ec12a2ee6e058737f93fd146c1512a63b50a7b1bfc2f1e631 name: agent-image-107-0-11-8645-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 name: agent-image-107-0-15-8741-1-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:e1804a7ba01f0b1fede6881803faf94c0d0d1c4d99b82dfcd7e2de870fae67d7 name: agent-image-108-0-6-8796-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:2db9d3d6eb6ee6abba4d31be791b4f35d30076b7478c3d7aadf67560f5b5bba8 name: agent_image_12_0_35_7911_1_1_2_0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:07205cd580a01b3102ee20723fa04cbb572580945127869e02e397ebe9ce3ad6 name: ops_manager_image_repository_8_0_6 - image: quay.io/mongodb/mongodb-kubernetes-init-ops-manager@sha256:c07c502e38e5106558da1f1d210b311164ababd55a9bfd07f4a19e488156b32c name: init-ops-manager-image-repository-1-2-0 - - image: quay.io/mongodb/mongodb-agent@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:288f37db36b6126fbe2ae09acd4752eb2428489ad40947503370a8e470db0bc1 name: agent-image-108-0-1-8718-1-1-0-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:8445d59d491a6d3890ea9c8b421214bcd0fdcfbc420ed40497a790b79f0bc89e name: mongodb-image-4-4-17-ubi8 @@ -1591,7 +1591,7 @@ spec: name: mongodb_image_5_0_8_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:912adcba03effa0929706ae34f3d66412558298f5df2be971785877905d81723 name: mongodb-image-5-0-8-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:6c08ec6352215cda558cfb37357b4b8faf63adc45ca22f585ae89651233717d6 name: agent-image-107-0-13-8702-1-1-2-0 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:ae7d722c78954f0089fc6868166ceb19bb910dfeefd7acdc851d21014bd4aab1 name: ops_manager_image_repository_7_0_10 @@ -1599,13 +1599,13 @@ spec: name: mongodb-image-5-0-5-ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:37af0913742b6de339c7e664b30f35dcef04506529e4a09a9bbbdd57643f7149 name: mongodb_image_4_4_20_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 name: agent-image-108-0-0-8694-1 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:3da86ad85be8588c70ad1ec0bd22ce0e063ad2085d6a7373e7956e1999782ac2 name: mongodb-image-5-0-9-ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:432462c265801d531327a19689c1c668e7ee5972d235ffdd3d910125d628f6d1 name: agent_image_108_0_4_8770_1_1_1_0 - - image: quay.io/mongodb/mongodb-agent@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:9d958eb46314f28542ffe33e4d0027ba37681b1ebcc25c823e45fe3b3a145946 name: agent_image_13_36_0_9555_1_1_0_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:9f76f053a61b5d25cadbf97c7804e3e94af337ae5c2e6cecdcafc22fb9649542 name: ops_manager_image_repository_8_0_3 @@ -1619,21 +1619,21 @@ spec: name: ops-manager-image-repository-7-0-13 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:1f096462174cc20e960bdc62850721047a9e691aacf40334d5e7696dd98e44de name: mongodb_image_6_0_3_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:04f6458c699ab2e5b8878c803784c486c34a8595deac65fa76a5de338e4780a3 name: agent-image-108-0-7-8810-1-1-1-0 - - image: quay.io/mongodb/mongodb-agent@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:de19730c88c98bf8b6ad98ee175f783d0298e4eeecf3c3709fe2e0981c3ee6f7 name: agent-image-12-0-33-7866-1 - - image: quay.io/mongodb/mongodb-agent@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0b48d8201e132bb82dd615c471cea0eef9376db3329c309049442efd3c6ce2e7 name: agent_image_107_0_15_8741_1_1_2_0 - - image: quay.io/mongodb/mongodb-agent@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:4d0a1403ec874b49004eb99ba0e077b01ee721932ebf8c172881dc9a8e03e295 name: agent_image_108_0_0_8694_1 - - image: quay.io/mongodb/mongodb-agent@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:5236018c21a44cc0cb58151b80165ab7f51c390ae449123a99f59b217dd8893a name: agent_image_12_0_33_7866_1_1_1_0 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:172e338a43df01ee5408d8d1e3cb038d2c55e77761c1a2ac6ed37959d1525140 name: mongodb_image_4_4_6_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d name: agent_image_107_0_12_8669_1 - - image: quay.io/mongodb/mongodb-agent@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:0217cc9262b9981cc8903d646d26c7bcafd397abc0584839812a19264b1cf6b1 name: agent_image_12_0_34_7888_1 - image: quay.io/mongodb/mongodb-enterprise-ops-manager-ubi@sha256:d663e12d43217ec28085c3471b47e0dba7ca730558de118f057e303c5d04922e name: ops_manager_image_repository_8_0_0 @@ -1641,15 +1641,15 @@ spec: name: mongodb_image_4_4_9_ubi8 - image: quay.io/mongodb/mongodb-enterprise-server@sha256:000af8d5dda39470ec167562a9880dca06dfe9dca9564ceaf74955b2527bc776 name: mongodb_image_8_0_0_ubi8 - - image: quay.io/mongodb/mongodb-agent@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:b8dfbb85a47c7647b96783819083e4aedf6aaadcdb7202beb174052356fa6b72 name: agent-image-107-0-13-8702-1 - - image: quay.io/mongodb/mongodb-agent@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:c540ecf01791899208227a4f3141f2253be50346ffbc6ec847da2d63db90241c name: agent-image-12-0-35-7911-1-1-0-1 - - image: quay.io/mongodb/mongodb-agent@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:73d39ecfa709fafafd7c32be17226989282b8637600320242ab1b76593538f6d name: agent-image-107-0-12-8669-1 - - image: quay.io/mongodb/mongodb-agent@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:c9639810f9457ad79cbf880a9ddeaf3bad557e2022b206ad54852d0cfc4395fe name: agent_image_108_0_7_8810_1 - - image: quay.io/mongodb/mongodb-agent@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 + - image: quay.io/mongodb/mongodb-agent-ubi@sha256:f81b5a29542149241ea273946e1f78ffb75323653f3ee753902b26420b988252 name: agent_image_12_0_34_7888_1_1_0_1 replaces: mongodb-kubernetes.v1.1.0 version: 1.2.0 From 60c787566219ef54ed40681a28004c4afe51c984 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 20 Aug 2025 17:16:31 +0200 Subject: [PATCH 152/164] migrate ecr --- build_info.json | 2 +- scripts/dev/contexts/local-defaults-context | 2 +- scripts/release/tests/build_info_test.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/build_info.json b/build_info.json index e0507f387..a99288e61 100644 --- a/build_info.json +++ b/build_info.json @@ -250,7 +250,7 @@ "agent": { "dockerfile-path": "docker/mongodb-agent/Dockerfile.atomic", "patch": { - "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent", "platforms": [ "linux/amd64" ] diff --git a/scripts/dev/contexts/local-defaults-context b/scripts/dev/contexts/local-defaults-context index 36d652d55..7f05709d4 100644 --- a/scripts/dev/contexts/local-defaults-context +++ b/scripts/dev/contexts/local-defaults-context @@ -41,7 +41,7 @@ export MONGODB_ENTERPRISE_DATABASE_IMAGE="${INIT_IMAGES_REGISTRY}/mongodb-kubern export MDB_AGENT_IMAGE_OPERATOR_VERSION=latest export MDB_AGENT_IMAGE_REPOSITORY="${BASE_REPO_URL_SHARED}/mongodb-agent-ubi" export AGENT_BASE_REGISTRY=${BASE_REPO_URL_SHARED} -export AGENT_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi:12.0.30.7791-1" +export AGENT_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent:12.0.30.7791-1" # these are needed to deploy OM export INIT_APPDB_IMAGE_REPOSITORY="${INIT_IMAGES_REGISTRY}/mongodb-kubernetes-init-appdb" diff --git a/scripts/release/tests/build_info_test.py b/scripts/release/tests/build_info_test.py index 74e06523f..ffd0f1f88 100644 --- a/scripts/release/tests/build_info_test.py +++ b/scripts/release/tests/build_info_test.py @@ -88,7 +88,7 @@ def test_load_build_info_development(git_repo: Repo): sign=False, ), "agent": ImageInfo( - repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent", platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", @@ -201,7 +201,7 @@ def test_load_build_info_patch(git_repo: Repo): sign=False, ), "agent": ImageInfo( - repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent", platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", From 9a349b98e29ee1bddf55e11c5df6d11994d41678 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 20 Aug 2025 17:18:38 +0200 Subject: [PATCH 153/164] migrate helm --- config/manager/manager.yaml | 40 +++++++++---------- .../output/0100_install_operator.out | 4 +- helm_chart/values.yaml | 4 +- .../output/0210_helm_install_operator.out | 4 +- public/mongodb-kubernetes-multi-cluster.yaml | 6 +-- public/mongodb-kubernetes-openshift.yaml | 40 +++++++++---------- public/mongodb-kubernetes.yaml | 6 +-- 7 files changed, 52 insertions(+), 52 deletions(-) diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index c93f3acb5..7e45c3445 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -87,9 +87,9 @@ spec: - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: MDB_AGENT_IMAGE_REPOSITORY - value: "quay.io/mongodb/mongodb-agent-ubi" + value: "quay.io/mongodb/mongodb-agent" - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL @@ -110,7 +110,7 @@ spec: value: mongodb-kubernetes-operator # Community Env Vars Start - name: MDB_COMMUNITY_AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: VERSION_UPGRADE_HOOK_IMAGE value: "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.9" - name: READINESS_PROBE_IMAGE @@ -131,39 +131,39 @@ spec: - name: RELATED_IMAGE_INIT_APPDB_IMAGE_REPOSITORY_1_2_0 value: "quay.io/mongodb/mongodb-kubernetes-init-appdb:1.2.0" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.11.8645-1" + value: "quay.io/mongodb/mongodb-agent:107.0.11.8645-1" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.12.8669-1" + value: "quay.io/mongodb/mongodb-agent:107.0.12.8669-1" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.13.8702-1" + value: "quay.io/mongodb/mongodb-agent:107.0.13.8702-1" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.15.8741-1" + value: "quay.io/mongodb/mongodb-agent:107.0.15.8741-1" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_17_8771_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.17.8771-1" + value: "quay.io/mongodb/mongodb-agent:107.0.17.8771-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.0.8694-1" + value: "quay.io/mongodb/mongodb-agent:108.0.0.8694-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.1.8718-1" + value: "quay.io/mongodb/mongodb-agent:108.0.1.8718-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_11_8830_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.11.8830-1" + value: "quay.io/mongodb/mongodb-agent:108.0.11.8830-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_12_8846_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.12.8846-1" + value: "quay.io/mongodb/mongodb-agent:108.0.12.8846-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.3.8758-1" + value: "quay.io/mongodb/mongodb-agent:108.0.3.8758-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.4.8770-1" + value: "quay.io/mongodb/mongodb-agent:108.0.4.8770-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.6.8796-1" + value: "quay.io/mongodb/mongodb-agent:108.0.6.8796-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.7.8810-1" + value: "quay.io/mongodb/mongodb-agent:108.0.7.8810-1" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1 - value: "quay.io/mongodb/mongodb-agent-ubi:12.0.34.7888-1" + value: "quay.io/mongodb/mongodb-agent:12.0.34.7888-1" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1 - value: "quay.io/mongodb/mongodb-agent-ubi:12.0.35.7911-1" + value: "quay.io/mongodb/mongodb-agent:12.0.35.7911-1" - name: RELATED_IMAGE_AGENT_IMAGE_13_38_0_9654_1 - value: "quay.io/mongodb/mongodb-agent-ubi:13.38.0.9654-1" + value: "quay.io/mongodb/mongodb-agent:13.38.0.9654-1" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_26 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.26" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_27 diff --git a/docs/community-search/quick-start/output/0100_install_operator.out b/docs/community-search/quick-start/output/0100_install_operator.out index 5ca0af2c3..c54b9c92e 100644 --- a/docs/community-search/quick-start/output/0100_install_operator.out +++ b/docs/community-search/quick-start/output/0100_install_operator.out @@ -10,11 +10,11 @@ dummy: value COMPUTED VALUES: agent: - name: mongodb-agent-ubi + name: mongodb-agent version: 108.0.2.8729-1 community: agent: - name: mongodb-agent-ubi + name: mongodb-agent version: 108.0.2.8729-1 mongodb: imageType: ubi8 diff --git a/helm_chart/values.yaml b/helm_chart/values.yaml index 0a44d5ca0..30d9ecb69 100644 --- a/helm_chart/values.yaml +++ b/helm_chart/values.yaml @@ -143,7 +143,7 @@ initAppDb: version: 1.2.0 agent: - name: mongodb-agent-ubi + name: mongodb-agent version: 108.0.2.8729-1 # This is only used by the MongoDBCommunity resource reconciler - START @@ -196,7 +196,7 @@ community: name: mongodb-community-server imageType: ubi8 agent: - name: mongodb-agent-ubi + name: mongodb-agent version: 108.0.2.8729-1 registry: agent: quay.io/mongodb diff --git a/public/architectures/setup-multi-cluster/setup-operator/output/0210_helm_install_operator.out b/public/architectures/setup-multi-cluster/setup-operator/output/0210_helm_install_operator.out index 20f59b746..7627b0a11 100644 --- a/public/architectures/setup-multi-cluster/setup-operator/output/0210_helm_install_operator.out +++ b/public/architectures/setup-multi-cluster/setup-operator/output/0210_helm_install_operator.out @@ -23,11 +23,11 @@ operator: COMPUTED VALUES: agent: - name: mongodb-agent-ubi + name: mongodb-agent version: 108.0.2.8729-1 community: agent: - name: mongodb-agent-ubi + name: mongodb-agent version: 108.0.2.8729-1 mongodb: imageType: ubi8 diff --git a/public/mongodb-kubernetes-multi-cluster.yaml b/public/mongodb-kubernetes-multi-cluster.yaml index f53630a0e..2f4099ab8 100644 --- a/public/mongodb-kubernetes-multi-cluster.yaml +++ b/public/mongodb-kubernetes-multi-cluster.yaml @@ -398,9 +398,9 @@ spec: - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: MDB_AGENT_IMAGE_REPOSITORY - value: "quay.io/mongodb/mongodb-agent-ubi" + value: "quay.io/mongodb/mongodb-agent" - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL @@ -419,7 +419,7 @@ spec: value: mongodb-kubernetes-operator-multi-cluster # Community Env Vars Start - name: MDB_COMMUNITY_AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: VERSION_UPGRADE_HOOK_IMAGE value: "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.9" - name: READINESS_PROBE_IMAGE diff --git a/public/mongodb-kubernetes-openshift.yaml b/public/mongodb-kubernetes-openshift.yaml index fda4f6729..480bfb38e 100644 --- a/public/mongodb-kubernetes-openshift.yaml +++ b/public/mongodb-kubernetes-openshift.yaml @@ -393,9 +393,9 @@ spec: - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: MDB_AGENT_IMAGE_REPOSITORY - value: "quay.io/mongodb/mongodb-agent-ubi" + value: "quay.io/mongodb/mongodb-agent" - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL @@ -414,7 +414,7 @@ spec: value: mongodb-kubernetes-operator # Community Env Vars Start - name: MDB_COMMUNITY_AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: VERSION_UPGRADE_HOOK_IMAGE value: "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.9" - name: READINESS_PROBE_IMAGE @@ -435,39 +435,39 @@ spec: - name: RELATED_IMAGE_INIT_APPDB_IMAGE_REPOSITORY_1_2_0 value: "quay.io/mongodb/mongodb-kubernetes-init-appdb:1.2.0" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_11_8645_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.11.8645-1" + value: "quay.io/mongodb/mongodb-agent:107.0.11.8645-1" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_12_8669_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.12.8669-1" + value: "quay.io/mongodb/mongodb-agent:107.0.12.8669-1" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_13_8702_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.13.8702-1" + value: "quay.io/mongodb/mongodb-agent:107.0.13.8702-1" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_15_8741_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.15.8741-1" + value: "quay.io/mongodb/mongodb-agent:107.0.15.8741-1" - name: RELATED_IMAGE_AGENT_IMAGE_107_0_17_8771_1 - value: "quay.io/mongodb/mongodb-agent-ubi:107.0.17.8771-1" + value: "quay.io/mongodb/mongodb-agent:107.0.17.8771-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_0_8694_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.0.8694-1" + value: "quay.io/mongodb/mongodb-agent:108.0.0.8694-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_1_8718_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.1.8718-1" + value: "quay.io/mongodb/mongodb-agent:108.0.1.8718-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_11_8830_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.11.8830-1" + value: "quay.io/mongodb/mongodb-agent:108.0.11.8830-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_12_8846_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.12.8846-1" + value: "quay.io/mongodb/mongodb-agent:108.0.12.8846-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_2_8729_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_3_8758_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.3.8758-1" + value: "quay.io/mongodb/mongodb-agent:108.0.3.8758-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_4_8770_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.4.8770-1" + value: "quay.io/mongodb/mongodb-agent:108.0.4.8770-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_6_8796_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.6.8796-1" + value: "quay.io/mongodb/mongodb-agent:108.0.6.8796-1" - name: RELATED_IMAGE_AGENT_IMAGE_108_0_7_8810_1 - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.7.8810-1" + value: "quay.io/mongodb/mongodb-agent:108.0.7.8810-1" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_34_7888_1 - value: "quay.io/mongodb/mongodb-agent-ubi:12.0.34.7888-1" + value: "quay.io/mongodb/mongodb-agent:12.0.34.7888-1" - name: RELATED_IMAGE_AGENT_IMAGE_12_0_35_7911_1 - value: "quay.io/mongodb/mongodb-agent-ubi:12.0.35.7911-1" + value: "quay.io/mongodb/mongodb-agent:12.0.35.7911-1" - name: RELATED_IMAGE_AGENT_IMAGE_13_38_0_9654_1 - value: "quay.io/mongodb/mongodb-agent-ubi:13.38.0.9654-1" + value: "quay.io/mongodb/mongodb-agent:13.38.0.9654-1" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_26 value: "quay.io/mongodb/mongodb-enterprise-ops-manager-ubi:6.0.26" - name: RELATED_IMAGE_OPS_MANAGER_IMAGE_REPOSITORY_6_0_27 diff --git a/public/mongodb-kubernetes.yaml b/public/mongodb-kubernetes.yaml index 69ff3f0e8..ed6024f98 100644 --- a/public/mongodb-kubernetes.yaml +++ b/public/mongodb-kubernetes.yaml @@ -394,9 +394,9 @@ spec: - name: OPS_MANAGER_IMAGE_PULL_POLICY value: Always - name: AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: MDB_AGENT_IMAGE_REPOSITORY - value: "quay.io/mongodb/mongodb-agent-ubi" + value: "quay.io/mongodb/mongodb-agent" - name: MONGODB_IMAGE value: mongodb-enterprise-server - name: MONGODB_REPO_URL @@ -415,7 +415,7 @@ spec: value: mongodb-kubernetes-operator # Community Env Vars Start - name: MDB_COMMUNITY_AGENT_IMAGE - value: "quay.io/mongodb/mongodb-agent-ubi:108.0.2.8729-1" + value: "quay.io/mongodb/mongodb-agent:108.0.2.8729-1" - name: VERSION_UPGRADE_HOOK_IMAGE value: "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook:1.0.9" - name: READINESS_PROBE_IMAGE From f7417c06d2b237d82896301e200f717d07cdae89 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Wed, 20 Aug 2025 17:22:20 +0200 Subject: [PATCH 154/164] add release notes --- changelog/20250820_feature_multi_arch_support.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/changelog/20250820_feature_multi_arch_support.md b/changelog/20250820_feature_multi_arch_support.md index 9f48dbe10..831f7b861 100644 --- a/changelog/20250820_feature_multi_arch_support.md +++ b/changelog/20250820_feature_multi_arch_support.md @@ -5,5 +5,8 @@ date: 2025-08-20 --- # Multi-Architecture Support -We've added comprehensive multi-architecture support for the kubernetes operator. This enhancement enables deployment on IBM Power (ppc64le) and IBM Z (s390x) architectures alongside +* We've added comprehensive multi-architecture support for the kubernetes operator. This enhancement enables deployment on IBM Power (ppc64le) and IBM Z (s390x) architectures alongside existing x86_64 support. All core images (operator, agent, init containers, database, readiness probe) now support multiple architectures + +# Helm Charts +* We've migrated the default repository from mongodb/mongodb-agent-ubi to mongodb/mongodb-agent. We've also rebuild and migrated the agent images over to the new repository with multi-architecture support. From 3b3985974a62bf27e56cccc8a3915a59e6ed896b Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 21 Aug 2025 11:09:19 +0200 Subject: [PATCH 155/164] remove not used agent --- release.json | 4 ---- scripts/dev/contexts/variables/om60 | 3 --- 2 files changed, 7 deletions(-) diff --git a/release.json b/release.json index 818737f55..7c27e42bf 100644 --- a/release.json +++ b/release.json @@ -109,10 +109,6 @@ "cloud_manager": "13.38.0.9654-1", "cloud_manager_tools": "100.12.2", "ops_manager": { - "6.0.26": { - "agent_version": "12.0.34.7888-1", - "tools_version": "100.10.0" - }, "6.0.27": { "agent_version": "12.0.35.7911-1", "tools_version": "100.10.0" diff --git a/scripts/dev/contexts/variables/om60 b/scripts/dev/contexts/variables/om60 index be1544ee4..a9df4cdcc 100644 --- a/scripts/dev/contexts/variables/om60 +++ b/scripts/dev/contexts/variables/om60 @@ -13,9 +13,6 @@ export CUSTOM_OM_VERSION export CUSTOM_MDB_VERSION=6.0.21 export CUSTOM_MDB_PREV_VERSION=5.0.7 -export AGENT_VERSION=12.0.33.7866-1 -export AGENT_IMAGE="${MDB_AGENT_IMAGE_REPOSITORY}:${AGENT_VERSION}" - export CUSTOM_APPDB_VERSION=6.0.21-ent export TEST_MODE=opsmanager export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev From 33e7ee05dd6a65ee286302275da054b25e69f8dc Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Mon, 25 Aug 2025 17:53:57 +0200 Subject: [PATCH 156/164] fix merge --- build_info.json | 30 +- scripts/dev/prepare_local_e2e_run.sh | 5 - scripts/evergreen/e2e/build_e2e_image_ibm.sh | 16 -- scripts/minikube/install-minikube.sh | 56 ---- scripts/minikube/minikube_host.sh | 213 --------------- scripts/minikube/setup_minikube_host.sh | 256 ------------------ .../agent/detect_ops_manager_changes.py | 35 --- scripts/release/tests/release_info_test.py | 4 +- 8 files changed, 13 insertions(+), 602 deletions(-) delete mode 100755 scripts/evergreen/e2e/build_e2e_image_ibm.sh delete mode 100755 scripts/minikube/install-minikube.sh delete mode 100755 scripts/minikube/minikube_host.sh delete mode 100755 scripts/minikube/setup_minikube_host.sh diff --git a/build_info.json b/build_info.json index 380f4bc6a..e77ffe414 100644 --- a/build_info.json +++ b/build_info.json @@ -4,7 +4,7 @@ "dockerfile-path": "docker/mongodb-kubernetes-operator/Dockerfile.atomic", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes"], - "platforms": [ + "platforms": [ "linux/amd64" ] }, @@ -49,7 +49,7 @@ "dockerfile-path": "docker/mongodb-kubernetes-init-database/Dockerfile.atomic", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database"], - "platforms": [ + "platforms": [ "linux/amd64" ] }, @@ -78,7 +78,7 @@ "dockerfile-path": "docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb"], - "platforms": [ + "platforms": [ "linux/amd64" ] }, @@ -107,7 +107,7 @@ "dockerfile-path": "docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager"], - "platforms": [ + "platforms": [ "linux/amd64" ] }, @@ -130,7 +130,7 @@ "dockerfile-path": "docker/mongodb-kubernetes-database/Dockerfile.atomic", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database"], - "platforms": [ + "platforms": [ "linux/amd64" ] }, @@ -190,7 +190,7 @@ "dockerfile-path": "docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe"], - "platforms": [ + "platforms": [ "linux/amd64" ] }, @@ -199,9 +199,7 @@ "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe"], "platforms": [ "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "release": { @@ -212,9 +210,7 @@ ], "platforms": [ "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] } }, @@ -222,7 +218,7 @@ "dockerfile-path": "docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-operator-version-upgrade-post-start-hook"], - "platforms": [ + "platforms": [ "linux/amd64" ] }, @@ -231,9 +227,7 @@ "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-operator-version-upgrade-post-start-hook"], "platforms": [ "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] }, "release": { @@ -242,9 +236,7 @@ "repositories": ["quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook"], "platforms": [ "linux/arm64", - "linux/amd64", - "linux/s390x", - "linux/ppc64le" + "linux/amd64" ] } }, diff --git a/scripts/dev/prepare_local_e2e_run.sh b/scripts/dev/prepare_local_e2e_run.sh index e6b1b9bcd..b711ef83a 100755 --- a/scripts/dev/prepare_local_e2e_run.sh +++ b/scripts/dev/prepare_local_e2e_run.sh @@ -60,11 +60,6 @@ prepare_operator_config_map "$(kubectl config current-context)" 2>&1 | prepend " rm -rf docker/mongodb-kubernetes-tests/helm_chart cp -rf helm_chart docker/mongodb-kubernetes-tests/helm_chart -rm -rf docker/mongodb-kubernetes-tests/public -cp -rf public docker/mongodb-kubernetes-tests/public -cp release.json docker/mongodb-kubernetes-tests/release.json -cp requirements.txt docker/mongodb-kubernetes-tests/requirements.txt - # shellcheck disable=SC2154 if [[ "${KUBE_ENVIRONMENT_NAME}" == "multi" ]]; then prepare_multi_cluster_e2e_run 2>&1 | prepend "prepare_multi_cluster_e2e_run" diff --git a/scripts/evergreen/e2e/build_e2e_image_ibm.sh b/scripts/evergreen/e2e/build_e2e_image_ibm.sh deleted file mode 100755 index 637c83357..000000000 --- a/scripts/evergreen/e2e/build_e2e_image_ibm.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash -source scripts/dev/set_env_context.sh - -# we need to use podman here and a special script as ibm machines don't have docker - -cp -rf public docker/mongodb-kubernetes-tests/public -cp release.json docker/mongodb-kubernetes-tests/release.json -cp requirements.txt docker/mongodb-kubernetes-tests/requirements.txt -cp -rf helm_chart docker/mongodb-kubernetes-tests/helm_chart - -echo "Building mongodb-kubernetes-tests image with tag: ${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}" -cd docker/mongodb-kubernetes-tests -sudo podman buildx build --progress plain . -f Dockerfile -t "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" -sudo podman push --authfile="/root/.config/containers/auth.json" "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" - -# docker buildx imagetools create "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" --append "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}-$(arch)" -t "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" diff --git a/scripts/minikube/install-minikube.sh b/scripts/minikube/install-minikube.sh deleted file mode 100755 index cacfd7618..000000000 --- a/scripts/minikube/install-minikube.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash -set -Eeou pipefail - -source scripts/dev/set_env_context.sh -source scripts/funcs/install - -# Detect architecture -ARCH=$(detect_architecture) - -echo "Installing minikube on ${ARCH} architecture..." - -# Install crictl (container runtime CLI) -echo "Installing crictl for ${ARCH}..." -CRICTL_VERSION=$(curl -s https://api.github.com/repos/kubernetes-sigs/cri-tools/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') - -# Download and extract crictl tar.gz -mkdir -p "${PROJECT_DIR:-.}/bin" -CRICTL_URL="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${ARCH}.tar.gz" -echo "Downloading ${CRICTL_URL}" -TEMP_DIR=$(mktemp -d) -curl --retry 3 --silent -L "${CRICTL_URL}" -o "${TEMP_DIR}/crictl.tar.gz" -tar -xzf "${TEMP_DIR}/crictl.tar.gz" -C "${TEMP_DIR}/" -chmod +x "${TEMP_DIR}/crictl" -mv "${TEMP_DIR}/crictl" "${PROJECT_DIR:-.}/bin/crictl" -rm -rf "${TEMP_DIR}" -echo "Installed crictl to ${PROJECT_DIR:-.}/bin" - -# Also install crictl system-wide so minikube can find it -echo "Installing crictl system-wide..." -if [[ -f "${PROJECT_DIR:-.}/bin/crictl" ]]; then - # Install to both /usr/local/bin and /usr/bin for better PATH coverage - sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/local/bin/crictl - sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/bin/crictl - sudo chmod +x /usr/local/bin/crictl - sudo chmod +x /usr/bin/crictl - echo "✅ crictl installed to /usr/local/bin/ and /usr/bin/" - - # Verify installation - if command -v crictl >/dev/null 2>&1; then - echo "✅ crictl is now available in PATH: $(which crictl)" - echo "✅ crictl version: $(crictl --version 2>/dev/null || echo 'version check failed')" - else - echo "⚠️ crictl installed but not found in PATH" - fi -else - echo "⚠️ crictl not found in project bin, minikube may have issues" -fi - -# Install minikube -echo "Installing minikube for ${ARCH}..." -MINIKUBE_VERSION=$(curl -s https://api.github.com/repos/kubernetes/minikube/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') - -# Download minikube for detected architecture -download_and_install_binary "${PROJECT_DIR:-.}/bin" minikube "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-${ARCH}" - -echo "Crictl ${CRICTL_VERSION} and Minikube ${MINIKUBE_VERSION} installed successfully for ${ARCH}" diff --git a/scripts/minikube/minikube_host.sh b/scripts/minikube/minikube_host.sh deleted file mode 100755 index 971587ec3..000000000 --- a/scripts/minikube/minikube_host.sh +++ /dev/null @@ -1,213 +0,0 @@ -#!/usr/bin/env bash - -# This is a helper script for running tests on s390x Hosts. -# It allows to configure minikube clusters and expose remote API servers on a local machine to -# enable local development while running minikube cluster on s390x instance. -# Run "minikube_host.sh help" command to see the full usage. -# Similar to evg_host.sh but uses minikube instead of kind. - -set -Eeou pipefail - -test "${MDB_BASH_DEBUG:-0}" -eq 1 && set -x - -source scripts/dev/set_env_context.sh -source scripts/funcs/printing -source scripts/funcs/install - -if [[ -z "${MINIKUBE_HOST_NAME}" ]]; then - echo "MINIKUBE_HOST_NAME env var is missing" - echo "Set it to your s390x host connection string (e.g., user@hostname)" - exit 1 -fi - -get_host_url() { - echo "${MINIKUBE_HOST_NAME}" -} - -cmd=${1-""} - -if [[ "${cmd}" != "" && "${cmd}" != "help" ]]; then - host_url=$(get_host_url) -fi - -kubeconfig_path="${HOME}/.operator-dev/minikube-host.kubeconfig" - -configure() { - ssh -T -q "${host_url}" "sudo chown \$(whoami):\$(whoami) ~/.docker || true; mkdir -p ~/.docker" - if [[ -f "${HOME}/.docker/config.json" ]]; then - echo "Copying local ~/.docker/config.json authorization credentials to s390x host" - jq '. | with_entries(select(.key == "auths"))' "${HOME}/.docker/config.json" | ssh -T -q "${host_url}" 'cat > ~/.docker/config.json' - fi - - sync - - ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; scripts/dev/switch_context.sh root-context; scripts/minikube/setup_minikube_host.sh " -} - -sync() { - rsync --verbose --archive --compress --human-readable --recursive --progress \ - --delete --delete-excluded --max-size=1000000 --prune-empty-dirs \ - -e ssh \ - --include-from=.rsyncinclude \ - --exclude-from=.gitignore \ - --exclude-from=.rsyncignore \ - ./ "${host_url}:~/mongodb-kubernetes/" - - rsync --verbose --no-links --recursive --prune-empty-dirs --archive --compress --human-readable \ - --max-size=1000000 \ - -e ssh \ - ~/.operator-dev/ \ - "${host_url}:~/.operator-dev" & - - wait -} - -remote-prepare-local-e2e-run() { - set -x - sync - cmd make switch context=e2e_mdb_kind_ubi_cloudqa - cmd scripts/dev/prepare_local_e2e_run.sh - rsync --verbose --no-links --recursive --prune-empty-dirs --archive --compress --human-readable \ - --max-size=1000000 \ - -e ssh \ - "${host_url}:~/mongodb-kubernetes/.multi_cluster_local_test_files" \ - ./ & - scp "${host_url}:~/.operator-dev/multicluster_kubeconfig" "${KUBE_CONFIG_PATH}" & - - wait -} - -get-kubeconfig() { - # For minikube, we need to get the kubeconfig and certificates - echo "Getting kubeconfig from minikube on s390x host..." - - # Create local minikube directory structure - mkdir -p "${HOME}/.minikube" - - # Copy certificates from remote host - echo "Copying minikube certificates..." - scp "${host_url}:~/.minikube/ca.crt" "${HOME}/.minikube/" - scp "${host_url}:~/.minikube/client.crt" "${HOME}/.minikube/" - scp "${host_url}:~/.minikube/client.key" "${HOME}/.minikube/" - - # Get kubeconfig and update paths to local ones - ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; kubectl config view --raw" > "${kubeconfig_path}" - - # Update certificate paths to local paths - sed -i '' "s|/home/cloud-user/.minikube|${HOME}/.minikube|g" "${kubeconfig_path}" - - # Update server addresses to use localhost for tunneling - sed -i '' "s|https://192.168.[0-9]*.[0-9]*:\([0-9]*\)|https://127.0.0.1:\1|g" "${kubeconfig_path}" - - echo "Copied minikube kubeconfig and certificates to ${kubeconfig_path}" -} - -recreate-minikube-cluster() { - configure "$(detect_architecture)" 2>&1| prepend "minikube_host.sh configure" - echo "Recreating minikube cluster on ${MINIKUBE_HOST_NAME} (${host_url})..." - # shellcheck disable=SC2088 - ssh -T "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; minikube delete || true; ./scripts/minikube/setup_minikube_host.sh" - echo "Copying kubeconfig to ${kubeconfig_path}" - get-kubeconfig -} - -tunnel() { - shift 1 - echo "Setting up tunnel for minikube cluster..." - - # Get the minikube API server port from remote host - local api_port - api_port=$(ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; minikube ip 2>/dev/null && echo ':8443' | tr -d '\n'") - - if [[ -z "${api_port}" ]]; then - echo "Could not determine minikube API server details. Is the cluster running?" - return 1 - fi - - # Extract just the port (8443) - local port="8443" - echo "Forwarding localhost:${port} to minikube cluster API server" - - # Forward the API server port through minikube - set -x - # shellcheck disable=SC2029 - ssh -L "${port}:$(ssh -T -q "${host_url}" "minikube ip"):${port}" "${host_url}" "$@" - set +x -} - -retry_with_sleep() { - shift 1 - cmd=$1 - local sleep_time - sleep_time=5 - - while true; do - ${cmd} || true - echo "Retrying command after ${sleep_time} of sleep: ${cmd}" - sleep 5; - done -} - -ssh_to_host() { - shift 1 - # shellcheck disable=SC2029 - ssh "$@" "${host_url}" -} - -upload-my-ssh-private-key() { - ssh -T -q "${host_url}" "mkdir -p ~/.ssh" - scp "${HOME}/.ssh/id_rsa" "${host_url}:~/.ssh/id_rsa" - scp "${HOME}/.ssh/id_rsa.pub" "${host_url}:~/.ssh/id_rsa.pub" - ssh -T -q "${host_url}" "chmod 700 ~/.ssh && chown -R \$(whoami):\$(whoami) ~/.ssh" -} - -cmd() { - if [[ "$1" == "cmd" ]]; then - shift 1 - fi - - cmd="cd ~/mongodb-kubernetes; $*" - ssh -T -q "${host_url}" "${cmd}" -} - -usage() { - echo "USAGE: - minikube_host.sh - -PREREQUISITES: - - s390x host with SSH access - - define MINIKUBE_HOST_NAME env var (e.g., export MINIKUBE_HOST_NAME=user@hostname) - - SSH key-based authentication configured - -COMMANDS: - configure installs on a host: calls sync, switches context, installs necessary software (auto-detects arch) - sync rsync of project directory - recreate-minikube-cluster recreates minikube cluster and executes get-kubeconfig - remote-prepare-local-e2e-run executes prepare-local-e2e on the remote host - get-kubeconfig copies remote minikube kubeconfig locally to ~/.operator-dev/minikube-host.kubeconfig - tunnel [args] creates ssh session with tunneling to all API servers - ssh [args] creates ssh session passing optional arguments to ssh - cmd [command with args] execute command as if being on s390x host - upload-my-ssh-private-key uploads your ssh keys (~/.ssh/id_rsa) to s390x host - help this message - -EXAMPLES: - export MINIKUBE_HOST_NAME=user@ibmz8 - minikube_host.sh tunnel - minikube_host.sh cmd 'make e2e test=replica_set' -" -} - -case ${cmd} in -configure) configure "$@" ;; -recreate-minikube-cluster) recreate-minikube-cluster "$@" ;; -get-kubeconfig) get-kubeconfig ;; -remote-prepare-local-e2e-run) remote-prepare-local-e2e-run ;; -ssh) ssh_to_host "$@" ;; -tunnel) retry_with_sleep tunnel "$@" ;; -sync) sync ;; -cmd) cmd "$@" ;; -upload-my-ssh-private-key) upload-my-ssh-private-key ;; -help) usage ;; -*) usage ;; -esac diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh deleted file mode 100755 index 1d2618c32..000000000 --- a/scripts/minikube/setup_minikube_host.sh +++ /dev/null @@ -1,256 +0,0 @@ -#!/usr/bin/env bash - -# this script downloads necessary tooling for alternative architectures (s390x, ppc64le) using minikube (similar to setup_evg_host.sh) -source scripts/dev/set_env_context.sh -source scripts/funcs/install - -set -Eeou pipefail - -set_limits() { - echo "Increasing fs.inotify.max_user_instances" - sudo sysctl -w fs.inotify.max_user_instances=8192 - - echo "Increasing fs.inotify.max_user_watches" - sudo sysctl -w fs.inotify.max_user_watches=10485760 - - echo "Increasing the number of open files" - nofile_max=$(cat /proc/sys/fs/nr_open) - nproc_max=$(ulimit -u) - sudo tee -a /etc/security/limits.conf <>> Setting up local registry and custom kicbase image for ppc64le..." - - # Check if local registry is running (with fallback for namespace issues) - registry_running=false - if curl -s http://localhost:5000/v2/_catalog >/dev/null 2>&1; then - echo "Registry detected via HTTP check (podman ps failed)" - registry_running=true - fi - - if ! $registry_running; then - echo "Starting local container registry on port 5000..." - - # Clean up any existing registry first - sudo podman rm -f registry 2>/dev/null || true - - if ! sudo podman run -d -p 5000:5000 --name registry --restart=always docker.io/library/registry:2; then - echo "❌ Failed to start local registry - trying alternative approach" - exit 1 - fi - - # Wait for registry to be ready - echo "Waiting for registry to be ready..." - for i in {1..30}; do - if curl -s http://localhost:5000/v2/_catalog >/dev/null 2>&1; then - break - fi - sleep 1 - done - else - echo "✅ Local registry already running" - fi - - # Configure podman to trust local registry (both user and root level for minikube) - echo "Configuring registries.conf to trust local registry..." - - # User-level config - mkdir -p ~/.config/containers - cat > ~/.config/containers/registries.conf << 'EOF' -[[registry]] -location = "localhost:5000" -insecure = true -EOF - - # Root-level config (since minikube uses sudo podman) - sudo mkdir -p /root/.config/containers - sudo tee /root/.config/containers/registries.conf << 'EOF' >/dev/null -[[registry]] -location = "localhost:5000" -insecure = true -EOF - - echo "✅ Registry configuration created for both user and root" - custom_image_tag="localhost:5000/kicbase:v0.0.47" - - # Determine image tag - custom_image_tag="localhost:5000/kicbase:v0.0.47" - if curl -s http://localhost:5000/v2/kicbase/tags/list | grep -q "v0.0.47"; then - echo "Custom kicbase image already exists in local registry" - return 0 - fi - - # Build custom kicbase image with crictl - echo "Building custom kicbase image with crictl for ppc64le..." - - # Build custom kicbase image - mkdir -p "${PROJECT_DIR:-.}/scripts/minikube/kicbase" - cat > "${PROJECT_DIR:-.}/scripts/minikube/kicbase/Dockerfile" << 'EOF' -FROM gcr.io/k8s-minikube/kicbase:v0.0.47 -RUN if [ "$(uname -m)" = "ppc64le" ]; then \ - CRICTL_VERSION="v1.28.0" && \ - curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-ppc64le.tar.gz" \ - -o /tmp/crictl.tar.gz && \ - tar -C /usr/bin -xzf /tmp/crictl.tar.gz && \ - chmod +x /usr/bin/crictl && \ - rm /tmp/crictl.tar.gz; \ - fi -EOF - - cd "${PROJECT_DIR:-.}/scripts/minikube/kicbase" - sudo podman build -t "${custom_image_tag}" . || { - echo "Failed to build custom image" - return 1 - } - sudo podman push "${custom_image_tag}" --tls-verify=false || { - echo "Failed to push to registry" - return 1 - } - cd - >/dev/null - echo "Custom kicbase image ready: ${custom_image_tag}" - fi - return 0 -} - -# Start minikube with podman driver -start_minikube_cluster() { - echo ">>> Starting minikube cluster with podman driver..." - - # Clean up any existing minikube state to avoid cached configuration issues - echo "Cleaning up any existing minikube state..." - if [[ -d ~/.minikube/machines/minikube ]]; then - echo "Removing ~/.minikube/machines/minikube directory..." - rm -rf ~/.minikube/machines/minikube - fi - - echo "Ensuring clean minikube state..." - "${PROJECT_DIR:-.}/bin/minikube" delete 2>/dev/null || true - - local start_args=("--driver=podman") - - if [[ "${ARCH}" == "ppc64le" ]]; then - echo "Using custom kicbase image for ppc64le with crictl..." - - start_args+=("--base-image=localhost:5000/kicbase:v0.0.47") - start_args+=("--insecure-registry=localhost:5000") - fi - - # Use default bridge CNI to avoid Docker Hub rate limiting issues - # start_args+=("--cni=bridge") - - echo "Starting minikube with args: ${start_args[*]}" - if "${PROJECT_DIR:-.}/bin/minikube" start "${start_args[@]}"; then - echo "✅ Minikube started successfully" - else - echo "❌ Minikube failed to start" - echo "Minikube logs:" - "${PROJECT_DIR:-.}/bin/minikube" logs | tail -20 - return 1 - fi -} - -setup_podman() { - echo "Setting up podman for ${ARCH}..." - - # Check if podman is already available - if command -v podman &> /dev/null; then - echo "✅ Podman already installed" - - # Diagnose podman state - echo "=== Podman Diagnostics ===" - echo "User: $(whoami), UID: $(id -u)" - echo "User namespace support: $(cat /proc/self/uid_map 2>/dev/null || echo 'not available')" - echo "Systemctl user status:" - systemctl --user status podman.socket 2>/dev/null || echo "podman.socket not active" - echo "Running 'sudo podman info' command..." - sudo podman info 2>&1 - fi - - - # Configure podman to use cgroupfs instead of systemd in CI - mkdir -p ~/.config/containers - cat > ~/.config/containers/containers.conf << EOF -[containers] -cgroup_manager = "cgroupfs" -events_logger = "file" - -[engine] -cgroup_manager = "cgroupfs" -EOF - -} - -# Setup podman and container runtime -setup_podman -set_limits -download_minikube - -# Setup local registry and custom kicbase image for ppc64le if needed -setup_local_registry_and_custom_image - -echo "" -echo ">>> Verifying minikube installation..." -if command -v minikube &> /dev/null; then - minikube_version=$(minikube version --short 2>/dev/null || minikube version 2>/dev/null | head -n1) - echo "✅ Minikube installed successfully: ${minikube_version}" -else - echo "❌ Minikube installation failed - minikube command not found" - echo "Please check the installation logs above for errors" - exit 1 -fi - -if [[ "${ARCH}" == "ppc64le" ]]; then - echo "" - echo ">>> Note: crictl will be patched into the minikube container after startup" -else - echo "" - echo ">>> Using standard kicbase image (crictl included for x86_64/aarch64/s390x)" -fi - -# Start the minikube cluster -start_minikube_cluster - -# Update kubectl context to point to the running cluster -echo "" -echo ">>> Updating kubectl context for minikube cluster..." -"${PROJECT_DIR:-.}/bin/minikube" update-context -echo "✅ Kubectl context updated successfully" - -echo "Minikube host setup completed successfully for ${ARCH}!" - -# Final status -echo "" -echo "==========================================" -echo "✅ Setup Summary" -echo "==========================================" -echo "Architecture: ${ARCH}" -echo "Container Runtime: podman" -echo "Minikube Driver: podman" -echo "Minikube: Default cluster" -echo "Minikube: ${minikube_version}" -echo "CNI: bridge (default)" -if [[ "${ARCH}" == "ppc64le" ]]; then - echo "Special Config: Custom kicbase image with crictl via local registry" -fi diff --git a/scripts/release/agent/detect_ops_manager_changes.py b/scripts/release/agent/detect_ops_manager_changes.py index b5787c8e3..cf5b70740 100644 --- a/scripts/release/agent/detect_ops_manager_changes.py +++ b/scripts/release/agent/detect_ops_manager_changes.py @@ -60,41 +60,6 @@ def extract_ops_manager_mapping(release_data: Dict) -> Dict: return release_data.get("supportedImages", {}).get("mongodb-agent", {}).get("opsManagerMapping", {}) -def _is_later_agent_version(version1: str, version2: str) -> bool: - """ - Compare two agent versions and return True if version1 is later than version2. - Agent versions are in format like "13.37.0.9590-1" or "108.0.12.8846-1" - """ - if not version1 or not version2: - return False - - def split_version(version: str) -> List[int]: - """Split version string into numeric parts, ignoring suffix after '-'""" - parts = [] - version_part = version.split("-")[0] # Remove suffix like "-1" - for part in version_part.split("."): - try: - parts.append(int(part)) - except ValueError: - # If we can't parse a part as int, skip it - continue - return parts - - v1_parts = split_version(version1) - v2_parts = split_version(version2) - - # Compare each part - max_len = max(len(v1_parts), len(v2_parts)) - for i in range(max_len): - v1_part = v1_parts[i] if i < len(v1_parts) else 0 - v2_part = v2_parts[i] if i < len(v2_parts) else 0 - - if v1_part != v2_part: - return v1_part > v2_part - - return False # Versions are equal - - def get_changed_agents(current_mapping: Dict, base_mapping: Dict) -> List[Tuple[str, str]]: """Returns list of (agent_version, tools_version) tuples for added/changed agents""" added_agents = [] diff --git a/scripts/release/tests/release_info_test.py b/scripts/release/tests/release_info_test.py index e2ed457f2..add0c2dd4 100644 --- a/scripts/release/tests/release_info_test.py +++ b/scripts/release/tests/release_info_test.py @@ -40,12 +40,12 @@ def test_create_release_info_json( }, "readiness-probe": { "repositories": ["quay.io/mongodb/mongodb-kubernetes-readinessprobe"], - "platforms": ["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], + "platforms": ["linux/arm64", "linux/amd64"], "version": readinessprobe_version, }, "upgrade-hook": { "repositories": ["quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook"], - "platforms": ["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], + "platforms": ["linux/arm64", "linux/amd64"], "version": operator_version_upgrade_post_start_hook_version, }, }, From 59ee174b07e2ca9779c88e060b5e398ce2d1730a Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Tue, 26 Aug 2025 15:56:06 +0200 Subject: [PATCH 157/164] Cleanup --- build_info.json | 2 +- generate_ssdlc_report.py | 2 +- scripts/dev/contexts/evg-private-context | 2 +- scripts/dev/contexts/local-defaults-context | 4 ++-- scripts/evergreen/periodic-cleanup-aws.py | 1 + scripts/funcs/operator_deployment | 1 + scripts/release/tests/build_info_test.py | 2 +- 7 files changed, 8 insertions(+), 6 deletions(-) diff --git a/build_info.json b/build_info.json index e77ffe414..bc36931cc 100644 --- a/build_info.json +++ b/build_info.json @@ -250,7 +250,7 @@ }, "staging": { "sign": true, - "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi"], + "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent"], "platforms": [ "linux/arm64", "linux/amd64", diff --git a/generate_ssdlc_report.py b/generate_ssdlc_report.py index ed3b60696..9954f2f08 100755 --- a/generate_ssdlc_report.py +++ b/generate_ssdlc_report.py @@ -97,7 +97,7 @@ def get_supported_images(release: Dict) -> dict[str, SupportedImage]: supported_images = convert_to_image_names(supported_images) supported_images["mongodb-agent-ubi"] = SupportedImage( get_supported_version_for_image("mongodb-agent"), - "mongodb-agent-ubi", + "mongodb-agent", "quay.io/mongodb/mongodb-agent", release["supportedImages"]["mongodb-agent"]["ssdlc_name"], list(), diff --git a/scripts/dev/contexts/evg-private-context b/scripts/dev/contexts/evg-private-context index 8f25842e8..c8bab1176 100644 --- a/scripts/dev/contexts/evg-private-context +++ b/scripts/dev/contexts/evg-private-context @@ -47,7 +47,7 @@ export OPS_MANAGER_REGISTRY=${QUAY_REGISTRY} export APPDB_REGISTRY=${QUAY_REGISTRY} export MONGODB_ENTERPRISE_DATABASE_IMAGE="${INIT_IMAGES_REGISTRY}/mongodb-kubernetes-database" export INIT_DATABASE_IMAGE_REPOSITORY="${INIT_IMAGES_REGISTRY}/mongodb-kubernetes-init-database" -export MDB_AGENT_IMAGE_REPOSITORY="${INIT_IMAGES_REGISTRY}/mongodb-agent-ubi" +export MDB_AGENT_IMAGE_REPOSITORY="${INIT_IMAGES_REGISTRY}/mongodb-agent" # these are needed to deploy OM export INIT_APPDB_IMAGE_REPOSITORY="${INIT_IMAGES_REGISTRY}/mongodb-kubernetes-init-appdb" diff --git a/scripts/dev/contexts/local-defaults-context b/scripts/dev/contexts/local-defaults-context index 7f05709d4..b876314b7 100644 --- a/scripts/dev/contexts/local-defaults-context +++ b/scripts/dev/contexts/local-defaults-context @@ -39,9 +39,9 @@ export MONGODB_REPO_URL="${QUAY_REGISTRY}" export APPDB_REGISTRY="${QUAY_REGISTRY}" export MONGODB_ENTERPRISE_DATABASE_IMAGE="${INIT_IMAGES_REGISTRY}/mongodb-kubernetes-database" export MDB_AGENT_IMAGE_OPERATOR_VERSION=latest -export MDB_AGENT_IMAGE_REPOSITORY="${BASE_REPO_URL_SHARED}/mongodb-agent-ubi" +export MDB_AGENT_IMAGE_REPOSITORY="${BASE_REPO_URL_SHARED}/mongodb-agent" export AGENT_BASE_REGISTRY=${BASE_REPO_URL_SHARED} -export AGENT_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent:12.0.30.7791-1" +export AGENT_IMAGE="${MDB_AGENT_IMAGE_REPOSITORY}:12.0.35.7911-1" # these are needed to deploy OM export INIT_APPDB_IMAGE_REPOSITORY="${INIT_IMAGES_REGISTRY}/mongodb-kubernetes-init-appdb" diff --git a/scripts/evergreen/periodic-cleanup-aws.py b/scripts/evergreen/periodic-cleanup-aws.py index 3f8ca0799..cdeec9603 100755 --- a/scripts/evergreen/periodic-cleanup-aws.py +++ b/scripts/evergreen/periodic-cleanup-aws.py @@ -7,6 +7,7 @@ REPOSITORIES_NAMES = [ "dev/mongodb-agent-ubi", + "dev/mongodb-agent", "dev/mongodb-kubernetes-init-appdb", "dev/mongodb-kubernetes-database", "dev/mongodb-kubernetes-init-database", diff --git a/scripts/funcs/operator_deployment b/scripts/funcs/operator_deployment index 36c1f4764..f13b31f23 100644 --- a/scripts/funcs/operator_deployment +++ b/scripts/funcs/operator_deployment @@ -34,6 +34,7 @@ get_operator_helm_values() { "operator.telemetry.send.enabled=${MDB_OPERATOR_TELEMETRY_SEND_ENABLED:-false}" # lets collect and save in the configmap as frequently as we can "operator.telemetry.collection.frequency=${MDB_OPERATOR_TELEMETRY_COLLECTION_FREQUENCY:-1m}" + "community.registry.agent=${AGENT_BASE_REGISTRY:-${REGISTRY}}" ) if [[ "${MDB_OPERATOR_TELEMETRY_INSTALL_CLUSTER_ROLE_INSTALLATION:-}" != "" ]]; then diff --git a/scripts/release/tests/build_info_test.py b/scripts/release/tests/build_info_test.py index ade30b545..fcf0cb111 100644 --- a/scripts/release/tests/build_info_test.py +++ b/scripts/release/tests/build_info_test.py @@ -323,7 +323,7 @@ def test_load_build_info_staging(git_repo: Repo): sign=True, ), "agent": ImageInfo( - repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi"], + repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent"], platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", From 703b56ec21444777b2393411c91eab0a373a8d3d Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Tue, 26 Aug 2025 16:51:46 +0200 Subject: [PATCH 158/164] Fix unit test --- scripts/release/tests/build_info_test.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/release/tests/build_info_test.py b/scripts/release/tests/build_info_test.py index fcf0cb111..e3c341da7 100644 --- a/scripts/release/tests/build_info_test.py +++ b/scripts/release/tests/build_info_test.py @@ -308,7 +308,7 @@ def test_load_build_info_staging(git_repo: Repo): ), "readiness-probe": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe"], - platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], + platforms=["linux/arm64", "linux/amd64"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", sign=True, @@ -317,7 +317,7 @@ def test_load_build_info_staging(git_repo: Repo): repositories=[ "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-operator-version-upgrade-post-start-hook" ], - platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], + platforms=["linux/arm64", "linux/amd64"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", sign=True, @@ -404,14 +404,14 @@ def test_load_build_info_release( ), "readiness-probe": ImageInfo( repositories=["quay.io/mongodb/mongodb-kubernetes-readinessprobe"], - platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], + platforms=["linux/arm64", "linux/amd64"], version=readinessprobe_version, dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", sign=True, ), "upgrade-hook": ImageInfo( repositories=["quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook"], - platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], + platforms=["linux/arm64", "linux/amd64"], version=operator_version_upgrade_post_start_hook_version, dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", sign=True, From 80857b4d4b922959c2fcaf0d8624aa565f354313 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Tue, 26 Aug 2025 19:44:01 +0200 Subject: [PATCH 159/164] add release notes --- changelog/20250826_feature_migrate_agent_repo.md | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 changelog/20250826_feature_migrate_agent_repo.md diff --git a/changelog/20250826_feature_migrate_agent_repo.md b/changelog/20250826_feature_migrate_agent_repo.md new file mode 100644 index 000000000..99ff8e432 --- /dev/null +++ b/changelog/20250826_feature_migrate_agent_repo.md @@ -0,0 +1,11 @@ +--- +title: migrate agent repo +kind: feature +date: 2025-08-26 +--- + +* we've migrated the agents to a new repository: `quay.io/mongodb/mongodb-agent`. + * the agents in the new repository will support x86-64, ARM64, s390x, and ppc64le. + * operator running >=MCK1.3.0 and static cannot use the agents at `quay.io/mongodb/mongodb-agent-ubi`. +* `quay.io/mongodb/mongodb-agent-ubi` should not be used anymore, it's only there for backwards compatibility. +* More can be read in the [public docs](https://www.mongodb.com/docs/kubernetes/upcoming/tutorial/plan-k8s-op-compatibility/#supported-hardware-architectures) From 1d7b447d2231d06ab783701b88a0652f105268f2 Mon Sep 17 00:00:00 2001 From: Lucian Tosa Date: Wed, 27 Aug 2025 16:48:07 +0200 Subject: [PATCH 160/164] Update RN --- changelog/20250826_feature_migrate_agent_repo.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/changelog/20250826_feature_migrate_agent_repo.md b/changelog/20250826_feature_migrate_agent_repo.md index 99ff8e432..404a86996 100644 --- a/changelog/20250826_feature_migrate_agent_repo.md +++ b/changelog/20250826_feature_migrate_agent_repo.md @@ -4,8 +4,8 @@ kind: feature date: 2025-08-26 --- -* we've migrated the agents to a new repository: `quay.io/mongodb/mongodb-agent`. - * the agents in the new repository will support x86-64, ARM64, s390x, and ppc64le. - * operator running >=MCK1.3.0 and static cannot use the agents at `quay.io/mongodb/mongodb-agent-ubi`. +* MongoDB Agent images have been migrated to new container repository: `quay.io/mongodb/mongodb-agent`. + * the agents in the new repository will support the x86-64, ARM64, s390x, and ppc64le architectures. + * operator running >=MCK1.3.0 and static cannot use the agent images from the old container repository `quay.io/mongodb/mongodb-agent-ubi`. * `quay.io/mongodb/mongodb-agent-ubi` should not be used anymore, it's only there for backwards compatibility. -* More can be read in the [public docs](https://www.mongodb.com/docs/kubernetes/upcoming/tutorial/plan-k8s-op-compatibility/#supported-hardware-architectures) +* More can be read in the [public docs](https://www.mongodb.com/docs/kubernetes/upcoming/tutorial/plan-k8s-op-container-images/) From 5eb1b1551d518365e18960a5c8babeaa57712e9c Mon Sep 17 00:00:00 2001 From: Lucian Tosa <49226451+lucian-tosa@users.noreply.github.com> Date: Thu, 28 Aug 2025 10:19:05 +0200 Subject: [PATCH 161/164] Fix tools in agent images (#376) # Summary This PR fixes an issue with the new agent dockerfiles where the db tools where not placed correctly. The binaries should be present in `/tools`. However, our tests were still passing. For that reason, the agent-script will now fail if the binaries are missing. ## Proof of Work The tests should fail if the binaries are missing. Example: https://spruce.mongodb.com/version/68af05cace35b000077fae05/tasks?sorts=STATUS%3AASC%3BBASE_STATUS%3ADESC The pipeline should pass. ## Checklist - [ ] Have you linked a jira ticket and/or is the ticket in the title? - [ ] Have you checked whether your jira ticket required DOCSP changes? - [ ] Have you added changelog file? - use `skip-changelog` label if not needed - refer to [Changelog files and Release Notes](https://github.com/mongodb/mongodb-kubernetes/blob/master/CONTRIBUTING.md#changelog-files-and-release-notes) section in CONTRIBUTING.md for more details --- docker/mongodb-agent/Dockerfile.atomic | 7 +++++-- .../content/agent-launcher.sh | 8 ++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/docker/mongodb-agent/Dockerfile.atomic b/docker/mongodb-agent/Dockerfile.atomic index 335407a98..15458c4e4 100644 --- a/docker/mongodb-agent/Dockerfile.atomic +++ b/docker/mongodb-agent/Dockerfile.atomic @@ -20,8 +20,11 @@ RUN case ${TARGETPLATFORM} in \ && mkdir -p /tools \ && curl -o /tools/mongodb_tools.tgz "${mongodb_tools_url}/${MONGODB_TOOLS_VERSION}" -RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ - && rm /tools/mongodb_tools.tgz +RUN tar xfz /tools/mongodb_tools.tgz \ + && mv mongodb-database-tools-*/bin/* /tools \ + && chmod +x /tools/* \ + && rm /tools/mongodb_tools.tgz \ + && rm -r mongodb-database-tools-* FROM --platform=${BUILDPLATFORM} registry.access.redhat.com/ubi9/ubi-minimal AS agent_downloader diff --git a/docker/mongodb-kubernetes-init-database/content/agent-launcher.sh b/docker/mongodb-kubernetes-init-database/content/agent-launcher.sh index 7bdf8164c..f565a63ea 100755 --- a/docker/mongodb-kubernetes-init-database/content/agent-launcher.sh +++ b/docker/mongodb-kubernetes-init-database/content/agent-launcher.sh @@ -206,10 +206,10 @@ else ln -sf "${MONGOD_ROOT}/bin/mongod" ${mdb_downloads_dir}/mongod/bin/mongod ln -sf "${MONGOD_ROOT}/bin/mongos" ${mdb_downloads_dir}/mongod/bin/mongos - ln -sf "/tools/mongodump" ${mdb_downloads_dir}/mongod/bin/mongodump - ln -sf "/tools/mongorestore" ${mdb_downloads_dir}/mongod/bin/mongorestore - ln -sf "/tools/mongoexport" ${mdb_downloads_dir}/mongod/bin/mongoexport - ln -sf "/tools/mongoimport" ${mdb_downloads_dir}/mongod/bin/mongoimport + for tool in mongoimport mongodump mongorestore mongoexport; do + [ -e "/tools/${tool}" ] || { echo "/tools/${tool} not found"; exit 1; } + ln -sf "/tools/${tool}" ${mdb_downloads_dir}/mongod/bin/${tool} + done else echo "Mongod PID not found within the specified time." exit 1 From 061992c683d768eedbd10cb6af0cdf047db49838 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 28 Aug 2025 10:47:47 +0200 Subject: [PATCH 162/164] Apply suggestion from @viveksinghggits Co-authored-by: Vivek Singh --- changelog/20250826_feature_migrate_agent_repo.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/changelog/20250826_feature_migrate_agent_repo.md b/changelog/20250826_feature_migrate_agent_repo.md index 404a86996..df07a9995 100644 --- a/changelog/20250826_feature_migrate_agent_repo.md +++ b/changelog/20250826_feature_migrate_agent_repo.md @@ -5,7 +5,6 @@ date: 2025-08-26 --- * MongoDB Agent images have been migrated to new container repository: `quay.io/mongodb/mongodb-agent`. - * the agents in the new repository will support the x86-64, ARM64, s390x, and ppc64le architectures. + * the agents in the new repository will support the x86-64, ARM64, s390x, and ppc64le architectures. More can be read in the [public docs](https://www.mongodb.com/docs/kubernetes/upcoming/tutorial/plan-k8s-op-container-images/). * operator running >=MCK1.3.0 and static cannot use the agent images from the old container repository `quay.io/mongodb/mongodb-agent-ubi`. * `quay.io/mongodb/mongodb-agent-ubi` should not be used anymore, it's only there for backwards compatibility. -* More can be read in the [public docs](https://www.mongodb.com/docs/kubernetes/upcoming/tutorial/plan-k8s-op-container-images/) From 1c9998c986b56b348c7cc640266ec47055bbf321 Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 28 Aug 2025 10:53:32 +0200 Subject: [PATCH 163/164] fix --- build_info.json | 2 +- docker/mongodb-kubernetes-database/README.md | 5 +---- docker/mongodb-kubernetes-init-appdb/README.md | 1 - docker/mongodb-kubernetes-init-database/README.md | 3 +-- docker/mongodb-kubernetes-tests/README.md | 1 - 5 files changed, 3 insertions(+), 9 deletions(-) diff --git a/build_info.json b/build_info.json index bc36931cc..6bb5ddd50 100644 --- a/build_info.json +++ b/build_info.json @@ -244,7 +244,7 @@ "dockerfile-path": "docker/mongodb-agent/Dockerfile.atomic", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent"], - "platforms": [ + "platforms": [ "linux/amd64" ] }, diff --git a/docker/mongodb-kubernetes-database/README.md b/docker/mongodb-kubernetes-database/README.md index 7dd70a6fa..6264e3d56 100644 --- a/docker/mongodb-kubernetes-database/README.md +++ b/docker/mongodb-kubernetes-database/README.md @@ -40,9 +40,6 @@ For building the MongoDB Database image locally use the example command: ```bash VERSION="1.3.0" -BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" -docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-database:${VERSION}" \ +docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-database:${VERSION}" \ --build-arg VERSION="${VERSION}" - -docker push "${BASE_REPO_URL}mongodb-kubernetes-database:${VERSION}" ``` diff --git a/docker/mongodb-kubernetes-init-appdb/README.md b/docker/mongodb-kubernetes-init-appdb/README.md index f3d51eb1c..9440e8e90 100644 --- a/docker/mongodb-kubernetes-init-appdb/README.md +++ b/docker/mongodb-kubernetes-init-appdb/README.md @@ -5,7 +5,6 @@ For building the MongoDB Init AppDB image locally use the example command: ```bash VERSION="1.3.0" MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" -BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-init-appdb/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-init-appdb:${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL_UBI}" \ diff --git a/docker/mongodb-kubernetes-init-database/README.md b/docker/mongodb-kubernetes-init-database/README.md index 04571f284..2ee22ed21 100644 --- a/docker/mongodb-kubernetes-init-database/README.md +++ b/docker/mongodb-kubernetes-init-database/README.md @@ -3,10 +3,9 @@ For building the MongoDB Init AppDB image locally use the example command: ```bash -VERSION="evergreen" +VERSION="1.3.0" TOOLS_VERSION="100.12.0" MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db" -BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-init-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL_UBI}" \ diff --git a/docker/mongodb-kubernetes-tests/README.md b/docker/mongodb-kubernetes-tests/README.md index e09b77a0e..736434b59 100644 --- a/docker/mongodb-kubernetes-tests/README.md +++ b/docker/mongodb-kubernetes-tests/README.md @@ -111,7 +111,6 @@ to call a particular E2E task we are interested in. ```bash make prepare-local-e2e cd docker/mongodb-kubernetes-tests -BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-tests:evergreen" \ --build-arg PYTHON_VERSION="3.13" From 6afdada9052495c85350ccbc0c79f66a562ce4ff Mon Sep 17 00:00:00 2001 From: Nam Nguyen Date: Thu, 28 Aug 2025 11:01:47 +0200 Subject: [PATCH 164/164] fix 2 removing lucian --- docker/mongodb-agent/README.md | 3 +-- docker/mongodb-kubernetes-operator/README.md | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/docker/mongodb-agent/README.md b/docker/mongodb-agent/README.md index f8ec6ea20..75f65c47a 100644 --- a/docker/mongodb-agent/README.md +++ b/docker/mongodb-agent/README.md @@ -8,12 +8,11 @@ binaries from there. Then we continue with the other steps to fully build the im For building the MongoDB Agent image locally use the example command: ```bash -VERSION="evergreen" +VERSION="1.3.0" AGENT_VERSION="108.0.7.8810-1" TOOLS_VERSION="100.12.0" MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" MONGODB_AGENT_URL="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" -BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" INIT_DATABASE_IMAGE="${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" MONGODB_AGENT_BASE="mongodb-mms-automation-agent" MONGODB_DATABASE_TOOLS_BASE="mongodb-database-tools" diff --git a/docker/mongodb-kubernetes-operator/README.md b/docker/mongodb-kubernetes-operator/README.md index 546ed893c..8aef617ad 100644 --- a/docker/mongodb-kubernetes-operator/README.md +++ b/docker/mongodb-kubernetes-operator/README.md @@ -13,10 +13,9 @@ CGO_ENABLED=0 GOOS=linux GOFLAGS="-mod=vendor" go build -i -o mongodb-kubernetes For building the MongoDB Init Ops Manager image locally use the example command: ```bash -VERSION="evergreen" +VERSION="1.3.0" LOG_AUTOMATION_CONFIG_DIFF="false" USE_RACE="false" -BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-operator/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes:${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg log_automation_config_diff="${LOG_AUTOMATION_CONFIG_DIFF}" \