diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index 187e170db..ed03ee798 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -51,10 +51,22 @@ functions: ### Setup Functions ### + setup_jq: &setup_jq + command: subprocess.exec + type: setup + params: + <<: *e2e_include_expansions_in_env + add_to_path: + - ${workdir}/bin + working_dir: src/github.com/mongodb/mongodb-kubernetes + binary: scripts/evergreen/setup_jq.sh + setup_context: &setup_context # Running the first switch is important to fill the workdir and other important initial env vars command: shell.exec type: setup params: + add_to_path: + - ${workdir}/bin shell: bash working_dir: src/github.com/mongodb/mongodb-kubernetes <<: *e2e_include_expansions_in_env @@ -103,6 +115,7 @@ functions: type: setup params: command: "git config --global user.email 'kubernetes-hosted-team@mongodb.com'" + - *setup_jq # we need jq in the context - *setup_context setup_kubectl: &setup_kubectl @@ -112,13 +125,6 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes binary: scripts/evergreen/setup_kubectl.sh - setup_jq: &setup_jq - command: subprocess.exec - type: setup - params: - working_dir: src/github.com/mongodb/mongodb-kubernetes - binary: scripts/evergreen/setup_jq.sh - setup_shellcheck: command: subprocess.exec type: setup @@ -225,7 +231,7 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes add_to_path: - ${workdir}/bin - binary: scripts/dev/configure_docker_auth.sh + binary: scripts/dev/configure_container_auth.sh setup_evg_host: &setup_evg_host command: subprocess.exec @@ -261,11 +267,30 @@ functions: - *setup_evg_host - *python_venv - prune_docker_resources: + # This differs for normal evg_host as we require minikube instead of kind for + # IBM machines and install aws cli via pip instead + setup_building_host_minikube: + - *switch_context - command: subprocess.exec type: setup params: - command: "docker system prune -a -f" + working_dir: src/github.com/mongodb/mongodb-kubernetes + add_to_path: + - ${workdir}/bin + command: scripts/evergreen/setup_minikube_host.sh + + prune_docker_resources: + - command: shell.exec + type: setup + params: + shell: bash + script: | + if command -v docker >/dev/null 2>&1; then + echo "Docker found, pruning docker resources..." + docker system prune -a -f + else + echo "Docker not found, skipping docker resource pruning" + fi # the task configures the set of tools necessary for any task working with K8 cluster: # installs kubectl, jq, kind (if necessary), configures docker authentication @@ -327,8 +352,7 @@ functions: shell: bash working_dir: src/github.com/mongodb/mongodb-kubernetes script: | - source .generated/context.export.env - scripts/evergreen/e2e/setup_cloud_qa.py create + scripts/evergreen/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py create # The additional switch is needed, since we now have created the needed OM exports. - *switch_context @@ -339,8 +363,7 @@ functions: shell: bash working_dir: src/github.com/mongodb/mongodb-kubernetes script: | - source .generated/context.export.env - scripts/evergreen/e2e/setup_cloud_qa.py delete + scripts/evergreen/run_python.sh scripts/evergreen/e2e/setup_cloud_qa.py delete dump_diagnostic_information_from_all_namespaces: - command: subprocess.exec @@ -504,6 +527,18 @@ functions: - ${workdir}/bin - ${workdir} + build_test_image_ibm: + - *switch_context + - command: subprocess.exec + params: + shell: bash + working_dir: src/github.com/mongodb/mongodb-kubernetes + include_expansions_in_env: + - version_id + add_to_path: + - ${workdir}/bin + binary: scripts/evergreen/e2e/build_e2e_image_ibm.sh + pipeline: - *switch_context - command: shell.exec diff --git a/.evergreen.yml b/.evergreen.yml index f81d14036..e2ba0aaa9 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -77,6 +77,13 @@ variables: - func: download_kube_tools - func: setup_building_host + - &setup_group_ibm + setup_group_can_fail_task: true + setup_group: + - func: clone + - func: setup_building_host_minikube + - func: build_multi_cluster_binary + - &setup_group_multi_cluster setup_group_can_fail_task: true setup_group: @@ -423,7 +430,15 @@ tasks: - func: build_multi_cluster_binary - func: pipeline vars: - image_name: test + image_name: meko-tests + + + - name: build_test_image_ibm + commands: + - func: clone + - func: setup_building_host + - func: build_multi_cluster_binary + - func: build_test_image_ibm - name: build_mco_test_image commands: @@ -431,7 +446,7 @@ tasks: - func: setup_building_host - func: pipeline vars: - image_name: mco-test + image_name: mco-tests - name: build_operator_ubi commands: @@ -1185,6 +1200,22 @@ task_groups: - e2e_om_ops_manager_backup <<: *teardown_group + - name: e2e_smoke_arm_task_group + max_hosts: -1 + <<: *setup_group + <<: *setup_and_teardown_task_cloudqa + tasks: + - e2e_replica_set + <<: *teardown_group + + - name: e2e_smoke_ibm_task_group + max_hosts: -1 + <<: *setup_group_ibm + <<: *setup_and_teardown_task_cloudqa + tasks: + - e2e_replica_set + <<: *teardown_group + - name: e2e_ops_manager_kind_5_0_only_task_group max_hosts: -1 <<: *setup_group @@ -1450,6 +1481,64 @@ buildvariants: tasks: - name: e2e_smoke_task_group + - name: e2e_smoke_ibm_power + display_name: e2e_smoke_ibm_power + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - rhel9-power-large + allowed_requesters: [ "patch", "github_tag" ] + depends_on: + - name: build_operator_ubi + variant: init_test_run + - name: build_init_database_image_ubi + variant: init_test_run + - name: build_database_image_ubi + variant: init_test_run + - name: build_init_appdb_images_ubi + variant: init_test_run + - name: build_init_om_images_ubi + variant: init_test_run + - name: build_agent_images_ubi + variant: init_test_run + - name: build_test_image_ibm + variant: init_test_run_ibm + tasks: + - name: e2e_smoke_ibm_task_group + + - name: e2e_smoke_ibm_z + display_name: e2e_smoke_ibm_z + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - rhel9-zseries-small + allowed_requesters: [ "patch", "github_tag" ] + depends_on: + - name: build_operator_ubi + variant: init_test_run + - name: build_init_database_image_ubi + variant: init_test_run + - name: build_database_image_ubi + variant: init_test_run + - name: build_init_appdb_images_ubi + variant: init_test_run + - name: build_init_om_images_ubi + variant: init_test_run + - name: build_agent_images_ubi + variant: init_test_run + - name: build_test_image_ibm + variant: init_test_run_ibm + tasks: + - name: e2e_smoke_ibm_task_group + + - name: e2e_smoke_arm + display_name: e2e_smoke_arm + tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + run_on: + - ubuntu2204-arm64-large + allowed_requesters: [ "patch", "github_tag" ] + <<: *base_no_om_image_dependency + tasks: + - name: e2e_smoke_arm_task_group + - name: e2e_static_smoke display_name: e2e_static_smoke tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] @@ -1664,6 +1753,14 @@ buildvariants: - name: build_upgrade_hook_image - name: prepare_aws + - name: init_test_run_ibm + display_name: init_test_run_ibm + max_hosts: -1 + run_on: + - ubuntu2204-small + tasks: + - name: build_test_image_ibm + - name: init_release_agents_on_ecr display_name: init_release_agents_on_ecr # this enables us to run this variant either manually (patch) which pct does or during an OM bump (github_pr) diff --git a/Makefile b/Makefile index 728721da5..8a6b5632a 100644 --- a/Makefile +++ b/Makefile @@ -147,7 +147,7 @@ ac: # in parallel and both call 'aws_login') then Docker login may return an error "Error saving credentials:..The # specified item already exists in the keychain". Seems this allows to ignore the error aws_login: - @ scripts/dev/configure_docker_auth.sh + @ scripts/dev/configure_container_auth.sh # cleans up aws resources, including s3 buckets which are older than 5 hours aws_cleanup: diff --git a/README.md b/README.md index e230cf351..1f5a59a10 100644 --- a/README.md +++ b/README.md @@ -57,4 +57,3 @@ Migration from [MongoDB Community Operator](https://github.com/mongodb/mongodb-k See our detailed migration guides: - [Migrating from MongoDB Community Operator](docs/migration/community-operator-migration.md) - [Migrating from MongoDB Enterprise Kubernetes Operator](https://www.mongodb.com/docs/kubernetes/current/tutorial/migrate-to-mck/) - diff --git a/build_info.json b/build_info.json index 10935b7ef..1db22b5fc 100644 --- a/build_info.json +++ b/build_info.json @@ -1,90 +1,126 @@ { "images": { - "mongodbOperator": { + "operator": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, - "initDatabase": { + "init-database": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-init-database-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-database", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-init-database", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, - "initAppDb": { + "init-appdb": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-init-appdb-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-appdb", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-init-appdb", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, - "initOpsManager": { + "init-ops-manager": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-init-ops-manager-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-ops-manager", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-init-ops-manager", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, @@ -92,25 +128,68 @@ "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database", "platforms": [ - "linux/amd64" + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-database-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-database", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-database", "platforms": [ "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" + ] + } + }, + "meko-tests": { + "patch": { + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-tests", + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" + ] + }, + "staging": { + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-tests", + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" + ] + } + }, + "mco-tests": { + "patch": { + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-community-tests", + "platforms": [ + "linux/amd64" + ] + }, + "staging": { + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-community-tests", + "platforms": [ "linux/amd64" ] } }, - "readinessprobe": { + "readiness-probe": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe", "platforms": [ @@ -118,22 +197,28 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-readinessprobe-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { "version": "1.0.22", + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-readinessprobe", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] } }, - "operator-version-upgrade-post-start-hook": { + "upgrade-hook": { "patch": { "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": [ @@ -141,17 +226,76 @@ ] }, "staging": { - "repository": "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook-stg", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": [ "linux/arm64", - "linux/amd64" + "linux/amd64", + "linux/s390x", + "linux/ppc64le" ] }, "release": { "version": "1.0.9", + "sign": true, "repository": "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": [ "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" + ] + } + }, + "agent": { + "patch": { + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", + "platforms": [ + "linux/amd64" + ] + }, + "staging": { + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi", + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" + ] + }, + "release": { + "sign": true, + "repository": "quay.io/mongodb/mongodb-agent-ubi", + "platforms": [ + "linux/arm64", + "linux/amd64", + "linux/s390x", + "linux/ppc64le" + ] + } + }, + "ops-manager": { + "patch": { + "version": "om-version-from-release.json", + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-enterprise-ops-manager", + "platforms": [ + "linux/amd64" + ] + }, + "staging": { + "version": "om-version-from-release.json", + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-enterprise-ops-manager", + "platforms": [ + "linux/amd64" + ] + }, + "release": { + "version": "om-version-from-release.json", + "sign": true, + "repository": "quay.io/mongodb/mongodb-enterprise-ops-manager", + "platforms": [ "linux/amd64" ] } @@ -166,6 +310,7 @@ ] }, "staging": { + "sign": true, "s3-store": "s3://kubectl-mongodb/staging", "platforms": [ "darwin/amd64", @@ -175,6 +320,7 @@ ] }, "release": { + "sign": true, "s3-store": "s3://kubectl-mongodb/prod", "platforms": [ "darwin/amd64", @@ -191,9 +337,11 @@ "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/helm-charts" }, "staging": { - "repository": "quay.io/mongodb/helm-charts-stg" + "sign": true, + "repository": "268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/helm-charts" }, "release": { + "sign": true, "repository": "quay.io/mongodb/helm-charts" } } diff --git a/build_info_agent.json b/build_info_agent.json new file mode 100644 index 000000000..a74f96abc --- /dev/null +++ b/build_info_agent.json @@ -0,0 +1,24 @@ +{ + "platform_mappings": { + "linux/amd64": { + "agent_suffix": "linux_x86_64.tar.gz", + "tools_suffix": "rhel88-x86_64-{TOOLS_VERSION}.tgz" + }, + "linux/arm64": { + "agent_suffix": "amzn2_aarch64.tar.gz", + "tools_suffix": "rhel88-aarch64-{TOOLS_VERSION}.tgz" + }, + "linux/s390x": { + "agent_suffix": "rhel7_s390x.tar.gz", + "tools_suffix": "rhel9-s390x-{TOOLS_VERSION}.tgz" + }, + "linux/ppc64le": { + "agent_suffix": "rhel8_ppc64le.tar.gz", + "tools_suffix": "rhel9-ppc64le-{TOOLS_VERSION}.tgz" + } + }, + "base_names": { + "agent": "mongodb-mms-automation-agent", + "tools": "mongodb-database-tools" + } +} diff --git a/docker/mongodb-agent-non-matrix/README.md b/docker/mongodb-agent-non-matrix/README.md new file mode 100644 index 000000000..c50d889c4 --- /dev/null +++ b/docker/mongodb-agent-non-matrix/README.md @@ -0,0 +1,27 @@ +### Building locally + +For building the MongoDB Agent (non-static) image locally use the example command: + +TODO: What to do with label quay.expires-after=48h? +```bash +AGENT_VERSION="108.0.7.8810-1" +TOOLS_VERSION="100.12.0" +MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" +MONGODB_AGENT_URL="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" + +docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent-non-matrix/Dockerfile -t "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}" \ + --build-arg version="${AGENT_VERSION}" \ + --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL}" \ + --build-arg mongodb_agent_url="${MONGODB_AGENT_URL}" \ + --build-arg mongodb_agent_version_s390x="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz" \ + --build-arg mongodb_agent_version_ppc64le="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" \ + --build-arg mongodb_agent_version_amd64="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" \ + --build-arg mongodb_agent_version_arm64="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" \ + --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-${TOOLS_VERSION}.tgz" + +docker push "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}" +``` diff --git a/docker/mongodb-agent/Dockerfile.atomic b/docker/mongodb-agent/Dockerfile.atomic index cd5eccf08..de9b32121 100644 --- a/docker/mongodb-agent/Dockerfile.atomic +++ b/docker/mongodb-agent/Dockerfile.atomic @@ -1,31 +1,82 @@ -FROM scratch AS base +FROM alpine:latest AS tools_downloader -ARG agent_version -ARG agent_distro -ARG tools_version -ARG tools_distro +ARG mongodb_tools_url -ADD https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-${agent_version}.${agent_distro}.tar.gz /data/mongodb-agent.tar.gz -ADD https://downloads.mongodb.org/tools/db/mongodb-database-tools-${tools_distro}-${tools_version}.tgz /data/mongodb-tools.tgz +# Set default empty values for all platforms +ARG mongodb_tools_version_amd64="" +ARG mongodb_tools_version_arm64="" +ARG mongodb_tools_version_s390x="" +ARG mongodb_tools_version_ppc64le="" -COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE -COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE -COPY ./docker/mongodb-agent/agent-launcher-shim.sh /opt/scripts/agent-launcher-shim.sh -COPY ./docker/mongodb-agent/setup-agent-files.sh /opt/scripts/setup-agent-files.sh -COPY ./docker/mongodb-agent/dummy-probe.sh /opt/scripts/dummy-probe.sh -COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /opt/scripts/dummy-readinessprobe.sh +# Create directories +RUN mkdir -p /data/amd64 /data/arm64 /data/s390x /data/ppc64le + +# Conditionally download only if the argument is provided +RUN if [ -n "$mongodb_tools_version_amd64" ]; then \ + wget -O /data/amd64/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_amd64}"; \ + fi + +RUN if [ -n "$mongodb_tools_version_arm64" ]; then \ + wget -O /data/arm64/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_arm64}"; \ + fi + +RUN if [ -n "$mongodb_tools_version_s390x" ]; then \ + wget -O /data/s390x/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_s390x}"; \ + fi + +RUN if [ -n "$mongodb_tools_version_ppc64le" ]; then \ + wget -O /data/ppc64le/mongodb_tools.tgz "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}"; \ + fi + +FROM alpine:latest AS agent_downloader + +ARG mongodb_agent_url + +# Set default empty values for all platforms +ARG mongodb_agent_version_amd64="" +ARG mongodb_agent_version_arm64="" +ARG mongodb_agent_version_s390x="" +ARG mongodb_agent_version_ppc64le="" + +# Create directories +RUN mkdir -p /data/amd64 /data/arm64 /data/s390x /data/ppc64le + +# Conditionally download only if the argument is provided +RUN if [ -n "$mongodb_agent_version_amd64" ]; then \ + wget -O /data/amd64/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_amd64}"; \ + fi + +RUN if [ -n "$mongodb_agent_version_arm64" ]; then \ + wget -O /data/arm64/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_arm64}"; \ + fi + +RUN if [ -n "$mongodb_agent_version_s390x" ]; then \ + wget -O /data/s390x/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_s390x}"; \ + fi + +RUN if [ -n "$mongodb_agent_version_ppc64le" ]; then \ + wget -O /data/ppc64le/mongodb_agent.tgz "${mongodb_agent_url}/${mongodb_agent_version_ppc64le}"; \ + fi FROM registry.access.redhat.com/ubi9/ubi-minimal -ARG version +ARG TARGETARCH -LABEL name="MongoDB Agent" \ - version="${version}" \ - summary="MongoDB Agent" \ - description="MongoDB Agent" \ - vendor="MongoDB" \ - release="1" \ - maintainer="support@mongodb.com" +# Create directories first +RUN mkdir -p /tools /agent + +# Copy the entire platform directory and handle missing files gracefully +COPY --from=tools_downloader "/data/" /tmp/tools_data/ +COPY --from=agent_downloader "/data/" /tmp/agent_data/ + +# Move files to the correct location if they exist +RUN if [ -f "/tmp/tools_data/${TARGETARCH}/mongodb_tools.tgz" ]; then \ + mv "/tmp/tools_data/${TARGETARCH}/mongodb_tools.tgz" /tools/mongodb_tools.tgz; \ + fi && \ + if [ -f "/tmp/agent_data/${TARGETARCH}/mongodb_agent.tgz" ]; then \ + mv "/tmp/agent_data/${TARGETARCH}/mongodb_agent.tgz" /agent/mongodb_agent.tgz; \ + fi && \ + rm -rf /tmp/tools_data /tmp/agent_data # Replace libcurl-minimal and curl-minimal with the full versions # https://bugzilla.redhat.com/show_bug.cgi?id=1994521 @@ -55,26 +106,39 @@ RUN mkdir -p /agent \ && touch /var/log/mongodb-mms-automation/readiness.log \ && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log - -COPY --from=base /data/mongodb-agent.tar.gz /agent -COPY --from=base /data/mongodb-tools.tgz /agent -COPY --from=base /data/LICENSE /licenses/LICENSE - # Copy scripts to a safe location that won't be overwritten by volume mount -COPY --from=base /opt/scripts/agent-launcher-shim.sh /usr/local/bin/agent-launcher-shim.sh -COPY --from=base /opt/scripts/setup-agent-files.sh /usr/local/bin/setup-agent-files.sh -COPY --from=base /opt/scripts/dummy-probe.sh /usr/local/bin/dummy-probe.sh -COPY --from=base /opt/scripts/dummy-readinessprobe.sh /usr/local/bin/dummy-readinessprobe - -RUN tar xfz /agent/mongodb-agent.tar.gz \ - && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ - && chmod +x /agent/mongodb-agent \ +COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE +COPY ./docker/mongodb-agent/agent-launcher-shim.sh /opt/scripts/agent-launcher-shim.sh +COPY ./docker/mongodb-agent/setup-agent-files.sh /opt/scripts/setup-agent-files.sh +COPY ./docker/mongodb-agent/dummy-probe.sh /opt/scripts/dummy-probe.sh +COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /opt/scripts/dummy-readinessprobe.sh + +# Extract agent files if they exist +RUN if [ -f "/agent/mongodb_agent.tgz" ]; then \ + tar xfz /agent/mongodb_agent.tgz \ + && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ + && chmod +x /agent/mongodb-agent \ + && rm /agent/mongodb_agent.tgz \ + && rm -r mongodb-mms-automation-agent-*; \ + fi \ && mkdir -p /var/lib/automation/config \ - && chmod -R +r /var/lib/automation/config \ - && rm /agent/mongodb-agent.tar.gz \ - && rm -r mongodb-mms-automation-agent-* + && chmod -R +r /var/lib/automation/config + +# Extract tools files if they exist +RUN if [ -f "/tools/mongodb_tools.tgz" ]; then \ + tar xfz /tools/mongodb_tools.tgz --directory /var/lib/mongodb-mms-automation/ \ + && rm /tools/mongodb_tools.tgz; \ + fi + +ARG version -RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz +LABEL name="MongoDB Agent" \ + version="${version}" \ + summary="MongoDB Agent" \ + description="MongoDB Agent" \ + vendor="MongoDB" \ + release="1" \ + maintainer="support@mongodb.com" USER 2000 CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] diff --git a/docker/mongodb-agent/README.md b/docker/mongodb-agent/README.md index a447d60f0..f8ec6ea20 100644 --- a/docker/mongodb-agent/README.md +++ b/docker/mongodb-agent/README.md @@ -8,13 +8,31 @@ binaries from there. Then we continue with the other steps to fully build the im For building the MongoDB Agent image locally use the example command: ```bash -VERSION="108.0.7.8810-1" -INIT_DATABASE_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database:1.1.0" -MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" -MONGODB_AGENT_URL_UBI="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-108.0.7.8810-1.rhel9_x86_64.tar.gz" -docker buildx build --load --progress plain . -f docker/mongodb-agent/Dockerfile -t "mongodb-agent:${VERSION}_1.1.0" \ - --build-arg version="${VERSION}" \ - --build-arg init_database_image="${INIT_DATABASE_IMAGE}" \ - --build-arg mongodb_tools_url_ubi="${MONGODB_TOOLS_URL_UBI}" \ - --build-arg mongodb_agent_url_ubi="${MONGODB_AGENT_URL_UBI}" +VERSION="evergreen" +AGENT_VERSION="108.0.7.8810-1" +TOOLS_VERSION="100.12.0" +MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" +MONGODB_AGENT_URL="https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod" +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" +INIT_DATABASE_IMAGE="${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" +MONGODB_AGENT_BASE="mongodb-mms-automation-agent" +MONGODB_DATABASE_TOOLS_BASE="mongodb-database-tools" + + +docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-agent/Dockerfile -t "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}_${VERSION}" \ + --build-arg version="${VERSION}" \ + --build-arg init_database_image="${INIT_DATABASE_IMAGE}" \ + --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL}" \ + --build-arg mongodb_agent_url="${MONGODB_AGENT_URL}" \ + --build-arg mongodb_agent_version_s390x="${MONGODB_AGENT_BASE}-${AGENT_VERSION}.rhel7_s390x.tar.gz" \ + --build-arg mongodb_agent_version_ppc64le="${MONGODB_AGENT_BASE}-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" \ + --build-arg mongodb_agent_version_amd64="${MONGODB_AGENT_BASE}-${AGENT_VERSION}.linux_x86_64.tar.gz" \ + --build-arg mongodb_agent_version_arm64="${MONGODB_AGENT_BASE}-${AGENT_VERSION}.amzn2_aarch64.tar.gz" \ + --build-arg mongodb_tools_version_arm64="${MONGODB_DATABASE_TOOLS_BASE}-rhel93-aarch64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_amd64="${MONGODB_DATABASE_TOOLS_BASE}-rhel93-x86_64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_s390x="${MONGODB_DATABASE_TOOLS_BASE}-rhel9-s390x-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_ppc64le="${MONGODB_DATABASE_TOOLS_BASE}-rhel9-ppc64le-${TOOLS_VERSION}.tgz" + +docker push "${BASE_REPO_URL}mongodb-agent:${AGENT_VERSION}_${VERSION}" + ``` diff --git a/docker/mongodb-community-tests/Dockerfile b/docker/mongodb-community-tests/Dockerfile index b568ff77f..0234b7e27 100644 --- a/docker/mongodb-community-tests/Dockerfile +++ b/docker/mongodb-community-tests/Dockerfile @@ -6,9 +6,7 @@ # # Ref: https://cryptography.io/en/latest/installation/#building-cryptography-on-linux # -ARG GOLANG_VERSION - -FROM public.ecr.aws/docker/library/golang:${GOLANG_VERSION} as builder +FROM public.ecr.aws/docker/library/golang:1.24 as builder ENV GO111MODULE=on ENV GOPATH "" diff --git a/docker/mongodb-kubernetes-database/README.md b/docker/mongodb-kubernetes-database/README.md index e7b937e0e..7dd70a6fa 100644 --- a/docker/mongodb-kubernetes-database/README.md +++ b/docker/mongodb-kubernetes-database/README.md @@ -39,7 +39,10 @@ this images with. For building the MongoDB Database image locally use the example command: ```bash -VERSION="1.0.1" -docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-database/Dockerfile -t "mongodb-kubernetes-database:${VERSION}" \ +VERSION="1.3.0" +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" +docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-database:${VERSION}" \ --build-arg VERSION="${VERSION}" + +docker push "${BASE_REPO_URL}mongodb-kubernetes-database:${VERSION}" ``` diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic b/docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic index ed0cea9dd..704361276 100644 --- a/docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic +++ b/docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic @@ -1,19 +1,18 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder + +ARG TARGETOS +ARG TARGETARCH COPY . /go/src/github.com/mongodb/mongodb-kubernetes WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go FROM scratch AS base -ARG mongodb_tools_url_ubi - COPY --from=readiness_builder /readinessprobe /data/ COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook -ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz - COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh /data/scripts/ @@ -23,6 +22,7 @@ COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ FROM registry.access.redhat.com/ubi8/ubi-minimal +ARG TARGETPLATFORM ARG version LABEL name="MongoDB Kubernetes Init AppDB" \ version="mongodb-kubernetes-init-appdb-${version}" \ @@ -42,7 +42,20 @@ RUN microdnf -y update --nodocs \ && microdnf -y install --nodocs tar gzip \ && microdnf clean all -COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz +ARG mongodb_tools_url +ARG mongodb_tools_version_s390x +ARG mongodb_tools_version_ppc64le +ARG mongodb_tools_version_amd64 +ARG mongodb_tools_version_arm64 + +RUN case ${TARGETPLATFORM} in \ + "linux/amd64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_amd64} ;; \ + "linux/arm64") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_arm64} ;; \ + "linux/s390x") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_s390x} ;; \ + "linux/ppc64le") export MONGODB_TOOLS_VERSION=${mongodb_tools_version_ppc64le} ;; \ + esac \ + && mkdir -p /tools \ + && curl -o /tools/mongodb_tools.tgz "${mongodb_tools_url}/${MONGODB_TOOLS_VERSION}" RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ && rm /tools/mongodb_tools.tgz diff --git a/docker/mongodb-kubernetes-init-appdb/README.md b/docker/mongodb-kubernetes-init-appdb/README.md index d49ca4b3a..f3d51eb1c 100644 --- a/docker/mongodb-kubernetes-init-appdb/README.md +++ b/docker/mongodb-kubernetes-init-appdb/README.md @@ -3,9 +3,14 @@ For building the MongoDB Init AppDB image locally use the example command: ```bash -VERSION="1.0.1" -MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" -docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-init-appdb/Dockerfile -t "mongodb-kubernetes-init-appdb:${VERSION}" \ +VERSION="1.3.0" +MONGODB_TOOLS_URL="https://downloads.mongodb.org/tools/db" +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" +docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-init-appdb/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-init-appdb:${VERSION}" \ --build-arg version="${VERSION}" \ - --build-arg mongodb_tools_url_ubi="${MONGODB_TOOLS_URL_UBI}" + --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL_UBI}" \ + --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" \ + --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" \ + --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-100.12.0.tgz" \ + --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" ``` diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile.atomic b/docker/mongodb-kubernetes-init-database/Dockerfile.atomic index 6c861fb6a..3f38f7870 100644 --- a/docker/mongodb-kubernetes-init-database/Dockerfile.atomic +++ b/docker/mongodb-kubernetes-init-database/Dockerfile.atomic @@ -1,18 +1,38 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder +FROM scratch AS tools_downloader -COPY . /go/src/github.com/mongodb/mongodb-kubernetes -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go +ARG mongodb_tools_url -FROM scratch AS base +ARG mongodb_tools_version_amd64 +ADD "${mongodb_tools_url}/${mongodb_tools_version_amd64}" /data/amd64/mongodb_tools.tgz -ARG mongodb_tools_url_ubi +ARG mongodb_tools_version_arm64 +ADD "${mongodb_tools_url}/${mongodb_tools_version_arm64}" /data/arm64/mongodb_tools.tgz -COPY --from=readiness_builder /readinessprobe /data/ -COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook +ARG mongodb_tools_version_s390x +ADD "${mongodb_tools_url}/${mongodb_tools_version_s390x}" /data/s390x/mongodb_tools.tgz + +ARG mongodb_tools_version_ppc64le +ADD "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}" /data/ppc64le/mongodb_tools.tgz + +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS readiness_builder + +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ + +COPY go.mod go.sum ./ + +RUN go mod download + +COPY mongodb-community-operator /go/src/github.com/mongodb/mongodb-kubernetes/mongodb-community-operator + +ARG TARGETOS +ARG TARGETARCH +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go +RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go -ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz +FROM scratch AS base + +COPY --from=readiness_builder /readinessprobe /data/readinessprobe +COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh @@ -21,8 +41,24 @@ COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher.sh /data/s COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ +#TODO ubi9? FROM registry.access.redhat.com/ubi8/ubi-minimal +ARG TARGETARCH +COPY --from=tools_downloader /data/${TARGETARCH}/mongodb_tools.tgz /tools/mongodb_tools.tgz + +RUN microdnf -y update --nodocs \ + && microdnf -y install --nodocs tar gzip \ + && microdnf clean all + +RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ + && rm /tools/mongodb_tools.tgz + +COPY --from=base /data/readinessprobe /probes/readinessprobe +COPY --from=base /data/probe.sh /probes/probe.sh +COPY --from=base /data/scripts/ /scripts/ +COPY --from=base /data/licenses /licenses/ + ARG version LABEL name="MongoDB Kubernetes Init Database" \ version="mongodb-kubernetes-init-database-${version}" \ @@ -32,19 +68,6 @@ LABEL name="MongoDB Kubernetes Init Database" \ vendor="MongoDB" \ maintainer="support@mongodb.com" -COPY --from=base /data/readinessprobe /probes/readinessprobe -COPY --from=base /data/probe.sh /probes/probe.sh -COPY --from=base /data/scripts/ /scripts/ -COPY --from=base /data/licenses /licenses/ - -RUN microdnf -y update --nodocs \ - && microdnf -y install --nodocs tar gzip \ - && microdnf clean all - -COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz - -RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ - && rm /tools/mongodb_tools.tgz - USER 2000 + ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] diff --git a/docker/mongodb-kubernetes-init-database/README.md b/docker/mongodb-kubernetes-init-database/README.md index 0e6657531..04571f284 100644 --- a/docker/mongodb-kubernetes-init-database/README.md +++ b/docker/mongodb-kubernetes-init-database/README.md @@ -3,9 +3,20 @@ For building the MongoDB Init AppDB image locally use the example command: ```bash -VERSION="1.0.1" -MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db/mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" -docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-init-database/Dockerfile -t "mongodb-kubernetes-init-database:${VERSION}" \ +VERSION="evergreen" +TOOLS_VERSION="100.12.0" +MONGODB_TOOLS_URL_UBI="https://downloads.mongodb.org/tools/db" +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" +docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-init-database/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" \ --build-arg version="${VERSION}" \ - --build-arg mongodb_tools_url_ubi="${MONGODB_TOOLS_URL_UBI}" + --build-arg mongodb_tools_url="${MONGODB_TOOLS_URL_UBI}" \ + --build-arg mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-${TOOLS_VERSION}.tgz" \ + --build-arg mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-${TOOLS_VERSION}.tgz" + +docker push "${BASE_REPO_URL}mongodb-kubernetes-init-database:${VERSION}" ``` + +first no cache 2:20.28 total +second no cache 2:31.74 total diff --git a/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh b/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh index eaed81cf0..aba8ca152 100755 --- a/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh +++ b/docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh @@ -91,10 +91,34 @@ download_agent() { AGENT_VERSION="${MDB_AGENT_VERSION}" fi + # Detect architecture for agent download + local detected_arch + detected_arch=$(uname -m) + + case "${detected_arch}" in + x86_64) + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" + ;; + aarch64|arm64) + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.amzn2_aarch64.tar.gz" + ;; + ppc64le) + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel8_ppc64le.tar.gz" + ;; + s390x) + AGENT_FILE="mongodb-mms-automation-agent-${AGENT_VERSION}.rhel7_s390x.tar.gz" + ;; + *) + script_log "Error: Unsupported architecture for MongoDB agent: ${detected_arch}" + exit 1 + ;; + esac + script_log "Downloading Agent version: ${AGENT_VERSION}" script_log "Downloading a Mongodb Agent from ${base_url:?}" curl_opts=( - "${base_url}/download/agent/automation/mongodb-mms-automation-agent-${AGENT_VERSION}.linux_x86_64.tar.gz" + "${base_url}/download/agent/automation/${AGENT_FILE}" + "--location" "--silent" "--retry" "3" "--fail" "-v" "--output" "automation-agent.tar.gz" ); @@ -117,13 +141,15 @@ download_agent() { rm "${MMS_LOG_DIR}/curl.log" 2>/dev/null || true script_log "The Mongodb Agent binary downloaded, unpacking" + + mkdir -p "${MMS_HOME}/files" tar -xzf automation-agent.tar.gz AGENT_VERSION=$(find . -name "mongodb-mms-automation-agent-*" | awk -F"-" '{ print $5 }') - mkdir -p "${MMS_HOME}/files" - echo "${AGENT_VERSION}" >"${MMS_HOME}/files/agent-version" mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent "${MMS_HOME}/files/" + rm -rf automation-agent.tar.gz mongodb-mms-automation-agent-*.* + + echo "${AGENT_VERSION}" >"${MMS_HOME}/files/agent-version" chmod +x "${MMS_HOME}/files/mongodb-mms-automation-agent" - rm -rf automation-agent.tar.gz mongodb-mms-automation-agent-*.linux_x86_64 script_log "The Automation Agent was deployed at ${MMS_HOME}/files/mongodb-mms-automation-agent" popd >/dev/null || true diff --git a/docker/mongodb-kubernetes-operator/Dockerfile.atomic b/docker/mongodb-kubernetes-operator/Dockerfile.atomic index dcd3af35c..8253c5e4f 100644 --- a/docker/mongodb-kubernetes-operator/Dockerfile.atomic +++ b/docker/mongodb-kubernetes-operator/Dockerfile.atomic @@ -1,8 +1,8 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder -ARG version -ARG log_automation_config_diff -ARG use_race +ARG BUILDARCH +ADD "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${BUILDARCH}" /usr/local/bin/jq +RUN chmod +x /usr/local/bin/jq COPY go.sum go.mod /go/src/github.com/mongodb/mongodb-kubernetes/ @@ -11,28 +11,28 @@ RUN go mod download COPY . /go/src/github.com/mongodb/mongodb-kubernetes -RUN go version -RUN git version +ARG version +ARG log_automation_config_diff +ARG use_race +ARG TARGETOS +ARG TARGETARCH + RUN mkdir /build && \ if [ $use_race = "true" ]; then \ echo "Building with race detector" && \ - CGO_ENABLED=1 go build -o /build/mongodb-kubernetes-operator \ + CGO_ENABLED=1 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /build/mongodb-kubernetes-operator \ -buildvcs=false \ -race \ -ldflags=" -X github.com/mongodb/mongodb-kubernetes/pkg/util.OperatorVersion=${version} \ -X github.com/mongodb/mongodb-kubernetes/pkg/util.LogAutomationConfigDiff=${log_automation_config_diff}"; \ else \ echo "Building without race detector" && \ - CGO_ENABLED=0 go build -o /build/mongodb-kubernetes-operator \ + CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /build/mongodb-kubernetes-operator \ -buildvcs=false \ -ldflags="-s -w -X github.com/mongodb/mongodb-kubernetes/pkg/util.OperatorVersion=${version} \ -X github.com/mongodb/mongodb-kubernetes/pkg/util.LogAutomationConfigDiff=${log_automation_config_diff}"; \ fi - -ADD https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 /usr/local/bin/jq -RUN chmod +x /usr/local/bin/jq - RUN mkdir -p /data RUN cat release.json | jq -r '.supportedImages."mongodb-agent" | { "supportedImages": { "mongodb-agent": . } }' > /data/om_version_mapping.json RUN chmod +r /data/om_version_mapping.json @@ -46,16 +46,6 @@ ADD docker/mongodb-kubernetes-operator/licenses /data/licenses/ FROM registry.access.redhat.com/ubi9/ubi-minimal -ARG version - -LABEL name="MongoDB Kubernetes Operator" \ - maintainer="support@mongodb.com" \ - vendor="MongoDB" \ - version="${version}" \ - release="1" \ - summary="MongoDB Kubernetes Operator Image" \ - description="MongoDB Kubernetes Operator Image" - # Building an UBI-based image: https://red.ht/3n6b9y0 RUN microdnf update \ --disableplugin=subscription-manager \ @@ -67,6 +57,16 @@ COPY --from=base /data/mongodb-kubernetes-operator /usr/local/bin/mongodb-kubern COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json COPY --from=base /data/licenses /licenses/ +ARG version + +LABEL name="MongoDB Kubernetes Operator" \ + maintainer="support@mongodb.com" \ + vendor="MongoDB" \ + version="${version}" \ + release="1" \ + summary="MongoDB Kubernetes Operator Image" \ + description="MongoDB Kubernetes Operator Image" + USER 2000 ENTRYPOINT exec /usr/local/bin/mongodb-kubernetes-operator diff --git a/docker/mongodb-kubernetes-operator/README.md b/docker/mongodb-kubernetes-operator/README.md index 8335c1d79..546ed893c 100644 --- a/docker/mongodb-kubernetes-operator/README.md +++ b/docker/mongodb-kubernetes-operator/README.md @@ -13,13 +13,16 @@ CGO_ENABLED=0 GOOS=linux GOFLAGS="-mod=vendor" go build -i -o mongodb-kubernetes For building the MongoDB Init Ops Manager image locally use the example command: ```bash -VERSION="1.1.0" +VERSION="evergreen" LOG_AUTOMATION_CONFIG_DIFF="false" USE_RACE="false" -docker buildx build --load --progress plain . -f docker/mongodb-kubernetes-operator/Dockerfile -t "mongodb-kubernetes-operator:${VERSION}" \ +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" +docker buildx build --load --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f docker/mongodb-kubernetes-operator/Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes:${VERSION}" \ --build-arg version="${VERSION}" \ --build-arg log_automation_config_diff="${LOG_AUTOMATION_CONFIG_DIFF}" \ --build-arg use_race="${USE_RACE}" + +docker push "${BASE_REPO_URL}mongodb-kubernetes:${VERSION}" ``` ### Running locally diff --git a/docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic b/docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic index 7466ece2b..55d661438 100644 --- a/docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic +++ b/docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic @@ -1,10 +1,15 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder -WORKDIR /go/src -ADD . . +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ + +COPY go.mod go.sum ./ + +RUN go mod download + +COPY mongodb-community-operator /go/src/github.com/mongodb/mongodb-kubernetes/mongodb-community-operator -ARG TARGETARCH ARG TARGETOS +ARG TARGETARCH RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -a -o /data/scripts/readinessprobe ./mongodb-community-operator/cmd/readiness/main.go FROM registry.access.redhat.com/ubi9/ubi-minimal diff --git a/docker/mongodb-kubernetes-tests/Dockerfile b/docker/mongodb-kubernetes-tests/Dockerfile index 424f5ee76..5347a8f19 100644 --- a/docker/mongodb-kubernetes-tests/Dockerfile +++ b/docker/mongodb-kubernetes-tests/Dockerfile @@ -8,19 +8,37 @@ # ARG PYTHON_VERSION -FROM --platform=linux/amd64 public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim as builder - +FROM public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim as builder RUN apt-get -qq update \ && apt-get -y -qq install \ - curl libldap2-dev libsasl2-dev build-essential git + curl libldap2-dev libsasl2-dev build-essential git libssl-dev pkg-config COPY requirements.txt requirements.txt -RUN python3 -m venv /venv && . /venv/bin/activate && python3 -m pip install -r requirements.txt +RUN python3 -m venv /venv && . /venv/bin/activate && pip install --upgrade pip && GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 pip install -r requirements.txt + +FROM scratch AS tools_downloader + +ARG mongodb_tools_url="https://fastdl.mongodb.org/tools/db" + +ARG mongodb_tools_version_amd64="mongodb-database-tools-rhel93-x86_64-100.12.0.tgz" +ADD "${mongodb_tools_url}/${mongodb_tools_version_amd64}" /data/amd64/mongodb_tools.tgz + +ARG mongodb_tools_version_arm64="mongodb-database-tools-rhel93-aarch64-100.12.0.tgz" +ADD "${mongodb_tools_url}/${mongodb_tools_version_arm64}" /data/arm64/mongodb_tools.tgz +ARG mongodb_tools_version_s390x="mongodb-database-tools-rhel9-s390x-100.12.0.tgz" +ADD "${mongodb_tools_url}/${mongodb_tools_version_s390x}" /data/s390x/mongodb_tools.tgz -FROM --platform=linux/amd64 public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim +ARG mongodb_tools_version_ppc64le="mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" +ADD "${mongodb_tools_url}/${mongodb_tools_version_ppc64le}" /data/ppc64le/mongodb_tools.tgz + + +FROM public.ecr.aws/docker/library/python:${PYTHON_VERSION}-slim + +ARG TARGETARCH +COPY --from=tools_downloader "/data/${TARGETARCH}/mongodb_tools.tgz" /tmp/mongodb-tools.tgz RUN apt-get -qq update \ && apt-get -y -qq install \ @@ -30,20 +48,22 @@ RUN apt-get -qq update \ git \ openssl -ENV HELM_NAME "helm-v3.17.1-linux-amd64.tar.gz" -# install Helm -RUN curl --fail --retry 3 -L -o "${HELM_NAME}" "https://get.helm.sh/${HELM_NAME}" \ - && tar -xzf "${HELM_NAME}" \ - && rm "${HELM_NAME}" \ - && mv "linux-amd64/helm" "/usr/local/bin/helm" - -ADD https://fastdl.mongodb.org/tools/db/mongodb-database-tools-ubuntu2204-x86_64-100.12.0.tgz /tmp/mongodb-tools.tgz RUN mkdir -p /tmp/mongodb-tools && \ tar xfz /tmp/mongodb-tools.tgz -C /tmp/mongodb-tools && \ cp /tmp/mongodb-tools/*/bin/* /usr/local/bin/ && \ rm -rf /tmp/mongodb-tools /tmp/mongodb-tools.tgz -RUN curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl" \ + +ENV HELM_NAME "helm-v3.17.1-linux-${TARGETARCH}.tar.gz" + +# install Helm +RUN curl --fail --retry 3 -L -o "${HELM_NAME}" "https://get.helm.sh/${HELM_NAME}" \ + && tar -xzf "${HELM_NAME}" \ + && rm "${HELM_NAME}" \ + && mv "linux-${TARGETARCH}/helm" "/usr/local/bin/helm" + +# install kubectl +RUN curl -LO "https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/${TARGETARCH}/kubectl" \ && chmod +x ./kubectl \ && mv ./kubectl /usr/local/bin/kubectl @@ -62,4 +82,4 @@ COPY release.json /release.json # we use the public directory to automatically test resources samples COPY public /mongodb-kubernetes/public -ADD multi-cluster-kube-config-creator_linux /usr/local/bin/multi-cluster-kube-config-creator +ADD "multi-cluster-kube-config-creator_${TARGETARCH}" /usr/local/bin/multi-cluster-kube-config-creator diff --git a/docker/mongodb-kubernetes-tests/README.md b/docker/mongodb-kubernetes-tests/README.md index dec9ac764..e09b77a0e 100644 --- a/docker/mongodb-kubernetes-tests/README.md +++ b/docker/mongodb-kubernetes-tests/README.md @@ -106,6 +106,18 @@ indicate which test classes need to be run. But for now they help us to call a particular E2E task we are interested in. +## Building test image + +```bash +make prepare-local-e2e +cd docker/mongodb-kubernetes-tests +BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa/" +docker buildx build --progress plain --platform linux/amd64,linux/arm64,linux/s390x,linux/ppc64le . -f Dockerfile -t "${BASE_REPO_URL}mongodb-kubernetes-tests:evergreen" \ + --build-arg PYTHON_VERSION="3.13" + +docker push "${BASE_REPO_URL}mongodb-kubernetes-tests:evergreen" +``` + # Writing New Tests # ### Create a new Python test file ### diff --git a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic index 31aa3a1ac..fab594d5e 100644 --- a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic +++ b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic @@ -1,7 +1,12 @@ -FROM public.ecr.aws/docker/library/golang:1.24 AS builder +FROM --platform=$BUILDPLATFORM public.ecr.aws/docker/library/golang:1.24 AS builder -WORKDIR /go/src -ADD . . +WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes/ + +COPY go.mod go.sum ./ + +RUN go mod download + +COPY mongodb-community-operator /go/src/github.com/mongodb/mongodb-kubernetes/mongodb-community-operator ARG TARGETARCH ARG TARGETOS diff --git a/pipeline.py b/pipeline.py index 2aac7fe40..42eeadddf 100755 --- a/pipeline.py +++ b/pipeline.py @@ -46,12 +46,12 @@ get_supported_operator_versions, get_supported_version_for_image_matrix_handling, ) -from scripts.evergreen.release.images_signing import ( +from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli +from scripts.release.build.image_signing import ( mongodb_artifactory_login, sign_image, verify_signature, ) -from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli TRACER = trace.get_tracer("evergreen-agent") diff --git a/pipeline_test.py b/pipeline_test.py index dab707faa..7c915c918 100644 --- a/pipeline_test.py +++ b/pipeline_test.py @@ -14,7 +14,7 @@ is_version_in_range, operator_build_configuration, ) -from scripts.evergreen.release.images_signing import run_command_with_retries +from scripts.release.build.image_signing import run_command_with_retries release_json = { "supportedImages": { diff --git a/scripts/dev/configure_container_auth.sh b/scripts/dev/configure_container_auth.sh new file mode 100755 index 000000000..9464ba409 --- /dev/null +++ b/scripts/dev/configure_container_auth.sh @@ -0,0 +1,167 @@ +#!/usr/bin/env bash + +set -Eeou pipefail +test "${MDB_BASH_DEBUG:-0}" -eq 1 && set -x + +source scripts/dev/set_env_context.sh +source scripts/funcs/checks +source scripts/funcs/printing +source scripts/funcs/kubernetes + +CONTAINER_RUNTIME="${CONTAINER_RUNTIME-"docker"}" + +setup_validate_container_runtime() { + case "${CONTAINER_RUNTIME}" in + "podman") + if ! command -v podman &> /dev/null; then + echo "Error: Podman is not available but was specified" + exit 1 + fi + USE_SUDO=true + CONFIG_PATH="/root/.config/containers/auth.json" + echo "Using Podman for container authentication (sudo mode)" + ;; + "docker") + if ! command -v docker &> /dev/null; then + echo "Error: Docker is not available but was specified" + exit 1 + fi + USE_SUDO=false + CONFIG_PATH="${HOME}/.docker/config.json" + echo "Using Docker for container authentication" + ;; + *) + echo "Error: Invalid container runtime '${CONTAINER_RUNTIME}'. Must be 'docker' or 'podman'" + exit 1 + ;; + esac + + if [[ "$USE_SUDO" == "true" ]]; then + sudo mkdir -p "$(dirname "${CONFIG_PATH}")" + else + mkdir -p "$(dirname "${CONFIG_PATH}")" + fi +} + +# Wrapper function to execute commands with or without sudo +exec_cmd() { + if [[ "$USE_SUDO" == "true" ]]; then + sudo env PATH="$PATH" "$@" + else + "$@" + fi +} + +# Wrapper function to read files with or without sudo +read_file() { + local file="$1" + if [[ "$USE_SUDO" == "true" ]]; then + sudo cat "$file" + else + cat "$file" + fi +} + +# Wrapper function to write files with or without sudo +write_file() { + local content="$1" + local file="$2" + if [[ "$USE_SUDO" == "true" ]]; then + echo "$content" | sudo tee "$file" > /dev/null + else + echo "$content" > "$file" + fi +} + +remove_element() { + local config_option="$1" + local tmpfile + tmpfile=$(mktemp) + + if [[ ! -f "${CONFIG_PATH}" ]]; then + write_file '{}' "${CONFIG_PATH}" + fi + + exec_cmd jq 'del(.'"${config_option}"')' "${CONFIG_PATH}" > "${tmpfile}" + exec_cmd cp "${tmpfile}" "${CONFIG_PATH}" + rm "${tmpfile}" +} + +container_login() { + local username="$1" + local registry="$2" + + if [[ "${CONTAINER_RUNTIME}" == "podman" ]]; then + exec_cmd podman login --authfile "${CONFIG_PATH}" --username "${username}" --password-stdin "${registry}" + else + docker login --username "${username}" --password-stdin "${registry}" + fi +} + +setup_validate_container_runtime + +if [[ ! -f "${CONFIG_PATH}" ]]; then + write_file '{}' "${CONFIG_PATH}" +fi + +if [[ -f "${CONFIG_PATH}" ]]; then + if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then + echo "Checking if container registry credentials are valid..." + ecr_auth=$(exec_cmd jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' "${CONFIG_PATH}") + + if [[ -n "${ecr_auth}" ]]; then + http_status=$(curl --head -s -o /dev/null -w "%{http_code}" --max-time 3 "https://268558157000.dkr.ecr.us-east-1.amazonaws.com/v2/dev/mongodb-kubernetes/manifests/latest" \ + -H "Authorization: Basic ${ecr_auth}" 2>/dev/null || echo "error/timeout") + + if [[ "${http_status}" != "401" && "${http_status}" != "403" && "${http_status}" != "error/timeout" ]]; then + echo "Container registry credentials are up to date - not performing the new login!" + exit + fi + echo "Container login required (HTTP status: ${http_status})" + else + echo "No ECR credentials found in container config - login required" + fi + fi + + title "Performing container login to ECR registries" + + # There could be some leftovers on Evergreen (Docker-specific, skip for Podman) + if [[ "${CONTAINER_RUNTIME}" == "docker" ]]; then + if exec_cmd grep -q "credsStore" "${CONFIG_PATH}"; then + remove_element "credsStore" + fi + if exec_cmd grep -q "credHelpers" "${CONFIG_PATH}"; then + remove_element "credHelpers" + fi + fi +fi + + +echo "$(aws --version)}" + +aws ecr get-login-password --region "us-east-1" | container_login "AWS" "268558157000.dkr.ecr.us-east-1.amazonaws.com" + +# by default docker tries to store credentials in an external storage (e.g. OS keychain) - not in the config.json +# We need to store it as base64 string in config.json instead so we need to remove the "credsStore" element +# This is Docker-specific behavior, Podman stores credentials directly in auth.json +if [[ "${CONTAINER_RUNTIME}" == "docker" ]] && exec_cmd grep -q "credsStore" "${CONFIG_PATH}"; then + remove_element "credsStore" + + # login again to store the credentials into the config.json + aws ecr get-login-password --region "us-east-1" | container_login "AWS" "268558157000.dkr.ecr.us-east-1.amazonaws.com" +fi + +aws ecr get-login-password --region "eu-west-1" | container_login "AWS" "268558157000.dkr.ecr.eu-west-1.amazonaws.com" + +if [[ -n "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON:-}" ]]; then + # log in to quay.io for the mongodb/mongodb-search-community private repo + # TODO remove once we switch to the official repo in Public Preview + quay_io_auth_file=$(mktemp) + config_tmp=$(mktemp) + echo "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON}" | base64 -d > "${quay_io_auth_file}" + exec_cmd jq -s '.[0] * .[1]' "${quay_io_auth_file}" "${CONFIG_PATH}" > "${config_tmp}" + exec_cmd mv "${config_tmp}" "${CONFIG_PATH}" + rm "${quay_io_auth_file}" +fi + +create_image_registries_secret diff --git a/scripts/dev/configure_docker_auth.sh b/scripts/dev/configure_docker_auth.sh deleted file mode 100755 index dfcb14f0b..000000000 --- a/scripts/dev/configure_docker_auth.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/usr/bin/env bash - -set -Eeou pipefail -test "${MDB_BASH_DEBUG:-0}" -eq 1 && set -x - -source scripts/dev/set_env_context.sh -source scripts/funcs/checks -source scripts/funcs/printing -source scripts/funcs/kubernetes - -check_docker_daemon_is_running() { - if [[ "$(uname -s)" != "Linux" ]]; then - echo "Skipping docker daemon check when not running in Linux" - return 0 - fi - - if systemctl is-active --quiet docker; then - echo "Docker is already running." - else - echo "Docker is not running. Starting Docker..." - # Start the Docker daemon - sudo systemctl start docker - for _ in {1..15}; do - if systemctl is-active --quiet docker; then - echo "Docker started successfully." - return 0 - fi - echo "Waiting for Docker to start..." - sleep 3 - done - fi -} - -remove_element() { - config_option="${1}" - tmpfile=$(mktemp) - jq 'del(.'"${config_option}"')' ~/.docker/config.json >"${tmpfile}" - cp "${tmpfile}" ~/.docker/config.json - rm "${tmpfile}" -} - -# This is the script which performs docker authentication to different registries that we use (so far ECR and RedHat) -# As the result of this login the ~/.docker/config.json will have all the 'auth' information necessary to work with docker registries - -check_docker_daemon_is_running - -if [[ -f ~/.docker/config.json ]]; then - if [[ "${RUNNING_IN_EVG:-"false"}" != "true" ]]; then - # Check if login is actually required by making a HEAD request to ECR using existing Docker config - echo "Checking if Docker credentials are valid..." - ecr_auth=$(jq -r '.auths."268558157000.dkr.ecr.us-east-1.amazonaws.com".auth // empty' ~/.docker/config.json) - - if [[ -n "${ecr_auth}" ]]; then - http_status=$(curl --head -s -o /dev/null -w "%{http_code}" --max-time 3 "https://268558157000.dkr.ecr.us-east-1.amazonaws.com/v2/dev/mongodb-kubernetes/manifests/latest" \ - -H "Authorization: Basic ${ecr_auth}" 2>/dev/null || echo "error/timeout") - - if [[ "${http_status}" != "401" && "${http_status}" != "403" && "${http_status}" != "error/timeout" ]]; then - echo "Docker credentials are up to date - not performing the new login!" - exit - fi - echo "Docker login required (HTTP status: ${http_status})" - else - echo "No ECR credentials found in Docker config - login required" - fi - fi - - title "Performing docker login to ECR registries" - - # There could be some leftovers on Evergreen - if grep -q "credsStore" ~/.docker/config.json; then - remove_element "credsStore" - fi - if grep -q "credHelpers" ~/.docker/config.json; then - remove_element "credHelpers" - fi -fi - - -echo "$(aws --version)}" - -aws ecr get-login-password --region "us-east-1" | docker login --username AWS --password-stdin 268558157000.dkr.ecr.us-east-1.amazonaws.com - -# by default docker tries to store credentials in an external storage (e.g. OS keychain) - not in the config.json -# We need to store it as base64 string in config.json instead so we need to remove the "credsStore" element -if grep -q "credsStore" ~/.docker/config.json; then - remove_element "credsStore" - - # login again to store the credentials into the config.json - aws ecr get-login-password --region "us-east-1" | docker login --username AWS --password-stdin 268558157000.dkr.ecr.us-east-1.amazonaws.com -fi - -aws ecr get-login-password --region "eu-west-1" | docker login --username AWS --password-stdin 268558157000.dkr.ecr.eu-west-1.amazonaws.com - -if [[ -n "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON:-}" ]]; then - # log in to quay.io for the mongodb/mongodb-search-community private repo - # TODO remove once we switch to the official repo in Public Preview - quay_io_auth_file=$(mktemp) - docker_configjson_tmp=$(mktemp) - echo "${COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON}" | base64 -d > "${quay_io_auth_file}" - jq -s '.[0] * .[1]' "${quay_io_auth_file}" ~/.docker/config.json > "${docker_configjson_tmp}" - mv "${docker_configjson_tmp}" ~/.docker/config.json - rm "${quay_io_auth_file}" -fi - -create_image_registries_secret diff --git a/scripts/dev/contexts/e2e_smoke_arm b/scripts/dev/contexts/e2e_smoke_arm new file mode 100644 index 000000000..64568ed5b --- /dev/null +++ b/scripts/dev/contexts/e2e_smoke_arm @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" + +export ops_manager_version="cloud_qa" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION + +export CUSTOM_MDB_VERSION=6.0.5-ent +export CUSTOM_MDB_PREV_VERSION=5.0.7-ent +export KUBE_ENVIRONMENT_NAME=kind +export CLUSTER_TYPE=kind + + +# TODO: change once we have image building +export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" +export REGISTRY="${BASE_REPO_URL}" +export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" +export OPERATOR_REGISTRY=${BASE_REPO_URL} +export DATABASE_REGISTRY=${BASE_REPO_URL} +export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} + diff --git a/scripts/dev/contexts/e2e_smoke_ibm_power b/scripts/dev/contexts/e2e_smoke_ibm_power new file mode 100644 index 000000000..2b1000cb6 --- /dev/null +++ b/scripts/dev/contexts/e2e_smoke_ibm_power @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" + +export ops_manager_version="cloud_qa" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION + +export CUSTOM_MDB_VERSION=6.0.5-ent +export CUSTOM_MDB_PREV_VERSION=5.0.7-ent +export KUBE_ENVIRONMENT_NAME=minikube +export CLUSTER_TYPE=minikube +export CONTAINER_RUNTIME=podman + +# TODO: change once we have image building +export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" +export REGISTRY="${BASE_REPO_URL}" +export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" +export OPERATOR_REGISTRY=${BASE_REPO_URL} +export DATABASE_REGISTRY=${BASE_REPO_URL} +export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} + diff --git a/scripts/dev/contexts/e2e_smoke_ibm_z b/scripts/dev/contexts/e2e_smoke_ibm_z new file mode 100644 index 000000000..2b1000cb6 --- /dev/null +++ b/scripts/dev/contexts/e2e_smoke_ibm_z @@ -0,0 +1,30 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" + +export ops_manager_version="cloud_qa" + +# This is required to be able to rebuild the om image and use that image which has been rebuild +export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') +export CUSTOM_OM_VERSION + +export CUSTOM_MDB_VERSION=6.0.5-ent +export CUSTOM_MDB_PREV_VERSION=5.0.7-ent +export KUBE_ENVIRONMENT_NAME=minikube +export CLUSTER_TYPE=minikube +export CONTAINER_RUNTIME=podman + +# TODO: change once we have image building +export BASE_REPO_URL="268558157000.dkr.ecr.us-east-1.amazonaws.com/lucian.tosa" +export REGISTRY="${BASE_REPO_URL}" +export INIT_DATABASE_IMAGE_REPOSITORY="${BASE_REPO_URL}/mongodb-kubernetes-init-database" +export OPERATOR_REGISTRY=${BASE_REPO_URL} +export DATABASE_REGISTRY=${BASE_REPO_URL} +export INIT_DATABASE_REGISTRY=${BASE_REPO_URL} + diff --git a/scripts/dev/contexts/init_test_run_ibm b/scripts/dev/contexts/init_test_run_ibm new file mode 100644 index 000000000..7f7f635e9 --- /dev/null +++ b/scripts/dev/contexts/init_test_run_ibm @@ -0,0 +1,8 @@ +#!/usr/bin/env bash + +set -Eeou pipefail + +script_name=$(readlink -f "${BASH_SOURCE[0]}") +script_dir=$(dirname "${script_name}") + +source "${script_dir}/root-context" diff --git a/scripts/dev/install_csi_driver.sh b/scripts/dev/install_csi_driver.sh index 85d365535..d9b3fccc5 100755 --- a/scripts/dev/install_csi_driver.sh +++ b/scripts/dev/install_csi_driver.sh @@ -2,6 +2,7 @@ set -Eeou pipefail +source scripts/dev/set_env_context.sh source scripts/funcs/kubernetes # Path to the deploy script diff --git a/scripts/dev/prepare_local_e2e_run.sh b/scripts/dev/prepare_local_e2e_run.sh index 6bac19df1..e6b1b9bcd 100755 --- a/scripts/dev/prepare_local_e2e_run.sh +++ b/scripts/dev/prepare_local_e2e_run.sh @@ -49,7 +49,7 @@ ensure_namespace "${NAMESPACE}" 2>&1 | prepend "ensure_namespace" echo "Deleting ~/.docker/.config.json and re-creating it" rm ~/.docker/config.json || true -scripts/dev/configure_docker_auth.sh 2>&1 | prepend "configure_docker_auth" +scripts/dev/configure_container_auth.sh 2>&1 | prepend "configure_docker_auth" echo "Configuring operator" scripts/evergreen/e2e/configure_operator.sh 2>&1 | prepend "configure_operator" @@ -60,6 +60,11 @@ prepare_operator_config_map "$(kubectl config current-context)" 2>&1 | prepend " rm -rf docker/mongodb-kubernetes-tests/helm_chart cp -rf helm_chart docker/mongodb-kubernetes-tests/helm_chart +rm -rf docker/mongodb-kubernetes-tests/public +cp -rf public docker/mongodb-kubernetes-tests/public +cp release.json docker/mongodb-kubernetes-tests/release.json +cp requirements.txt docker/mongodb-kubernetes-tests/requirements.txt + # shellcheck disable=SC2154 if [[ "${KUBE_ENVIRONMENT_NAME}" == "multi" ]]; then prepare_multi_cluster_e2e_run 2>&1 | prepend "prepare_multi_cluster_e2e_run" diff --git a/scripts/dev/recreate_python_venv.sh b/scripts/dev/recreate_python_venv.sh index fb1f9ab8f..6594b3fb2 100755 --- a/scripts/dev/recreate_python_venv.sh +++ b/scripts/dev/recreate_python_venv.sh @@ -6,24 +6,109 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +install_pyenv() { + # Check if pyenv directory exists first + if [[ -d "${HOME}/.pyenv" ]]; then + echo "pyenv directory already exists, setting up environment..." >&2 + export PYENV_ROOT="${HOME}/.pyenv" + export PATH="${PYENV_ROOT}/bin:${PATH}" + + # Initialize pyenv in current shell + if command -v pyenv &> /dev/null; then + eval "$(pyenv init --path)" + eval "$(pyenv init -)" + echo "pyenv already installed and initialized" >&2 + return 0 + else + echo "pyenv directory exists but binary not working, reinstalling..." >&2 + rm -rf "${HOME}/.pyenv" + fi + fi + + # Check if pyenv command is available in PATH + if command -v pyenv &> /dev/null; then + echo "pyenv already available in PATH" >&2 + return 0 + fi + + echo "Installing pyenv..." >&2 + + # Install pyenv via the official installer + if curl -s -S -L https://raw.githubusercontent.com/pyenv/pyenv-installer/master/bin/pyenv-installer | bash; then + # Add pyenv to PATH for current session + export PYENV_ROOT="${HOME}/.pyenv" + export PATH="${PYENV_ROOT}/bin:${PATH}" + + # Initialize pyenv in current shell + if command -v pyenv &> /dev/null; then + eval "$(pyenv init --path)" + eval "$(pyenv init -)" + fi + + echo "pyenv installed successfully" >&2 + return 0 + else + echo "Failed to install pyenv" >&2 + return 1 + fi +} + +ensure_required_python() { + local required_version="${PYTHON_VERSION:-3.13}" + local major_minor + major_minor=$(echo "${required_version}" | grep -oE '^[0-9]+\.[0-9]+') + + echo "Setting up Python ${required_version} (${major_minor}.x)..." >&2 + + # Always install pyenv first + if ! install_pyenv; then + echo "Error: Failed to install pyenv" >&2 + return 1 + fi + + # Install latest version in the required series + local latest_version + latest_version=$(pyenv install --list | grep -E "^[[:space:]]*${major_minor}\.[0-9]+$" | tail -1 | xargs) + if [[ -n "${latest_version}" ]]; then + echo "Installing Python ${latest_version} via pyenv..." >&2 + # Use --skip-existing to avoid errors if version already exists + if pyenv install --skip-existing "${latest_version}"; then + pyenv global "${latest_version}" + # Install python3-venv package for Debian/Ubuntu systems if needed + if command -v apt-get &> /dev/null; then + echo "Installing python3-venv package for venv support..." >&2 + sudo apt-get update -qq && sudo apt-get install -y python3-venv || true + fi + return 0 + fi + fi + + echo "Error: Unable to install Python ${major_minor} via pyenv" >&2 + return 1 +} + if [[ -d "${PROJECT_DIR}"/venv ]]; then echo "Removing venv..." cd "${PROJECT_DIR}" rm -rf "venv" fi -# in our EVG hosts, python versions are always in /opt/python -python_bin="/opt/python/${PYTHON_VERSION}/bin/python3" -if [[ "$(uname)" == "Darwin" ]]; then - python_bin="python${PYTHON_VERSION}" -fi - -echo "Using python from the following path: ${python_bin}" +# Ensure required Python version is available +ensure_required_python -"${python_bin}" -m venv venv +python3 -m venv venv source venv/bin/activate pip install --upgrade pip -pip install -r requirements.txt + +skip_requirements="${SKIP_INSTALL_REQUIREMENTS:-false}" +if [[ "${skip_requirements}" != "true" ]]; then + echo "Installing requirements.txt..." + pip install -r requirements.txt +else + echo "Skipping requirements.txt installation." + pip install requests +fi + echo "Python venv was recreated successfully." echo "Current python path: $(which python)" python --version diff --git a/scripts/dev/reset_helm.sh b/scripts/dev/reset_helm.sh index 080cbffb1..15280c56f 100755 --- a/scripts/dev/reset_helm.sh +++ b/scripts/dev/reset_helm.sh @@ -1,5 +1,7 @@ #!/bin/bash +source scripts/dev/set_env_context.sh + # Set Helm release name HELM_RELEASE="mongodb-kubernetes-operator" diff --git a/scripts/dev/switch_context.sh b/scripts/dev/switch_context.sh index 5d07e8fff..e5b25c4e4 100755 --- a/scripts/dev/switch_context.sh +++ b/scripts/dev/switch_context.sh @@ -99,17 +99,23 @@ echo "Generated env files in $(readlink -f "${destination_envs_dir}"):" # shellcheck disable=SC2010 ls -l1 "${destination_envs_dir}" | grep "context" -if which kubectl > /dev/null; then +# Prefer kubectl from bin directory if it exists, otherwise use system kubectl +KUBECTL_CMD="kubectl" +if [[ -n "${PROJECT_DIR:-}" && -x "${PROJECT_DIR}/bin/kubectl" ]]; then + KUBECTL_CMD="${PROJECT_DIR}/bin/kubectl" +fi + +if [[ "$KUBECTL_CMD" != "kubectl" ]] || which kubectl > /dev/null; then if [ "${CLUSTER_NAME-}" ]; then # The convention: the cluster name must match the name of kubectl context # We expect this not to be true if kubernetes cluster is still to be created (minikube/kops) - if ! kubectl config use-context "${CLUSTER_NAME}"; then + if ! "$KUBECTL_CMD" config use-context "${CLUSTER_NAME}"; then echo "Warning: failed to switch kubectl context to: ${CLUSTER_NAME}" echo "Does a matching Kubernetes context exist?" fi # Setting the default namespace for current context - kubectl config set-context "$(kubectl config current-context)" "--namespace=${NAMESPACE}" &>/dev/null || true + "$KUBECTL_CMD" config set-context "$("$KUBECTL_CMD" config current-context)" "--namespace=${NAMESPACE}" &>/dev/null || true # shellcheck disable=SC2153 echo "Current context: ${context} (kubectl context: ${CLUSTER_NAME}), namespace=${NAMESPACE}" diff --git a/scripts/evergreen/build_multi_cluster_kubeconfig_creator.sh b/scripts/evergreen/build_multi_cluster_kubeconfig_creator.sh index f8bbf1890..63729d024 100755 --- a/scripts/evergreen/build_multi_cluster_kubeconfig_creator.sh +++ b/scripts/evergreen/build_multi_cluster_kubeconfig_creator.sh @@ -16,14 +16,21 @@ echo "Building multi cluster kube config creation tool." project_dir="$(pwd)" pushd cmd/kubectl-mongodb -GOOS="${OS}" GOARCH="${ARCH}" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator" main.go -GOOS="linux" GOARCH="amd64" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_linux" main.go +GOOS="${OS}" GOARCH="${ARCH}" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator" main.go & + +GOOS="linux" GOARCH="amd64" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_amd64" main.go & +GOOS="linux" GOARCH="s390x" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_s390x" main.go & +GOOS="linux" GOARCH="ppc64le" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_ppc64le" main.go & +GOOS="linux" GOARCH="arm64" CGO_ENABLED=0 go build -buildvcs=false -o "${project_dir}/docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_arm64" main.go & +wait popd chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator -# this one is used for the dockerfile to build the test image running on linux, this script might create 2 times -# the same binary, but on the average case it creates one for linux and one for darwin-arm -chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_linux +# these are used in the dockerfile +chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_amd64 +chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_s390x +chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_ppc64le +chmod +x docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator_arm64 mkdir -p bin || true cp docker/mongodb-kubernetes-tests/multi-cluster-kube-config-creator bin/kubectl-mongodb || true diff --git a/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml b/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml index 67661b29b..0b6e72b1b 100644 --- a/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml +++ b/scripts/evergreen/deployments/test-app/templates/mongodb-enterprise-tests.yaml @@ -53,9 +53,9 @@ spec: secretName: test-pod-multi-cluster-config {{ end }} containers: - - image: busybox + - image: public.ecr.aws/docker/library/busybox:1.37.0 name: keepalive - command: [ "/bin/sh", "-c", "sleep inf" ] + command: ["/bin/sh", "-c", "while true; do sleep 3600; done"] volumeMounts: - name: results mountPath: /tmp/results diff --git a/scripts/evergreen/e2e/build_e2e_image_ibm.sh b/scripts/evergreen/e2e/build_e2e_image_ibm.sh new file mode 100755 index 000000000..66aad76c3 --- /dev/null +++ b/scripts/evergreen/e2e/build_e2e_image_ibm.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +source scripts/dev/set_env_context.sh + +cp -rf public docker/mongodb-kubernetes-tests/public +cp release.json docker/mongodb-kubernetes-tests/release.json +cp requirements.txt docker/mongodb-kubernetes-tests/requirements.txt +cp -rf helm_chart docker/mongodb-kubernetes-tests/helm_chart + +echo "Building mongodb-kubernetes-tests image with tag: ${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}" +cd docker/mongodb-kubernetes-tests +sudo podman buildx build --progress plain . -f Dockerfile -t "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" --build-arg PYTHON_VERSION="${PYTHON_VERSION}" +sudo podman push --authfile="/root/.config/containers/auth.json" "${BASE_REPO_URL}/mongodb-kubernetes-tests:${version_id}-$(arch)" + +# docker buildx imagetools create "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" --append "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}-$(arch)" -t "${BASE_REPO_URL}mongodb-kubernetes-tests:${version_id}" diff --git a/scripts/evergreen/e2e/single_e2e.sh b/scripts/evergreen/e2e/single_e2e.sh index dbb1306da..f14cf9ab9 100755 --- a/scripts/evergreen/e2e/single_e2e.sh +++ b/scripts/evergreen/e2e/single_e2e.sh @@ -27,6 +27,21 @@ deploy_test_app() { tag="${OVERRIDE_VERSION_ID}" fi + local arch + arch=$(uname -m) + + case "${arch}" in + ppc64le) + tag="${tag}-ppc64le" + ;; + s390x) + tag="${tag}-s390x" + ;; + *) + echo "Not IBM host, using default tag" + ;; + esac + IS_PATCH="${IS_PATCH:-default_patch}" TASK_NAME="${TASK_NAME:-default_task}" EXECUTION="${EXECUTION:-default_execution}" @@ -213,8 +228,8 @@ run_tests() { echo # We need to make sure to access this file after the test has finished - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml - kubectl --context "${test_pod_context}" -n "${NAMESPACE}" cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" -c keepalive cp "${TEST_APP_PODNAME}":/tmp/results/myreport.xml logs/myreport.xml + kubectl --context "${test_pod_context}" -n "${NAMESPACE}" -c keepalive cp "${TEST_APP_PODNAME}":/tmp/diagnostics logs status="$(kubectl --context "${test_pod_context}" get pod "${TEST_APP_PODNAME}" -n "${NAMESPACE}" -o jsonpath="{ .status }" | jq -r '.containerStatuses[] | select(.name == "mongodb-enterprise-operator-tests")'.state.terminated.reason)" [[ "${status}" == "Completed" ]] diff --git a/scripts/evergreen/setup_aws.sh b/scripts/evergreen/setup_aws.sh index 931eb0a36..4900de4fd 100755 --- a/scripts/evergreen/setup_aws.sh +++ b/scripts/evergreen/setup_aws.sh @@ -2,17 +2,95 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +source scripts/funcs/install -INSTALL_DIR="${workdir:?}/.local/lib/aws" -BIN_LOCATION="${workdir}/bin" +# Install AWS CLI v2 via binary download (for amd64 and arm64) +install_aws_cli_binary() { + local arch="$1" + echo "Installing AWS CLI v2 via binary download for ${arch}..." -mkdir -p "${BIN_LOCATION}" + # Map architecture names for AWS CLI download URLs + local aws_arch + aws_arch=$(uname -m) -tmpdir=$(mktemp -d) -cd "${tmpdir}" + # Download and install AWS CLI v2 + local temp_dir + temp_dir=$(mktemp -d) + cd "${temp_dir}" -curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip" -unzip awscliv2.zip &> /dev/null + echo "Downloading AWS CLI v2 for ${aws_arch}..." + curl -s "https://awscli.amazonaws.com/awscli-exe-linux-${aws_arch}.zip" -o "awscliv2.zip" + + unzip -q awscliv2.zip + sudo ./aws/install --update + + # Clean up + cd - > /dev/null + rm -rf "${temp_dir}" + + # Verify installation + if command -v aws &> /dev/null; then + echo "AWS CLI v2 installed successfully:" + aws --version + else + echo "Error: AWS CLI v2 installation failed" >&2 + return 1 + fi +} + +# Install AWS CLI v1 via pip (for IBM architectures: ppc64le, s390x) +install_aws_cli_pip() { + echo "Installing AWS CLI v1 via pip (for IBM architectures)..." + + # Ensure pip is available + if ! command -v pip3 &> /dev/null && ! command -v pip &> /dev/null; then + echo "Error: pip is not available. Please install Python and pip first." >&2 + return 1 + fi + + # Use pip3 if available, otherwise pip + local pip_cmd="pip3" + if ! command -v pip3 &> /dev/null; then + pip_cmd="pip" + fi + + echo "Installing AWS CLI using ${pip_cmd}..." + ${pip_cmd} install --user awscli + + # Add ~/.local/bin to PATH if not already there (where pip --user installs) + if [[ ":$PATH:" != *":$HOME/.local/bin:"* ]]; then + export PATH="$HOME/.local/bin:$PATH" + echo "Added ~/.local/bin to PATH" + fi + + # Verify installation + if command -v aws &> /dev/null; then + echo "AWS CLI v1 installed successfully:" + aws --version + else + echo "Error: AWS CLI v1 installation failed or not found in PATH" >&2 + return 1 + fi +} + +# Main installation logic +install_aws_cli() { + local arch + arch=$(detect_architecture) + + case "${arch}" in + amd64|arm64) + echo "Standard architecture detected (${arch}). Using binary installation..." + install_aws_cli_binary "${arch}" + ;; + *) + echo "Warning: Unknown architecture ${arch}. Falling back to pip installation..." + install_aws_cli_pip + ;; + esac +} + +install_aws_cli docker_dir="/home/${USER}/.docker" if [[ ! -d "${docker_dir}" ]]; then @@ -21,7 +99,5 @@ fi sudo chown "${USER}":"${USER}" "${docker_dir}" -R sudo chmod g+rwx "${docker_dir}" -R -sudo ./aws/install --bin-dir "${BIN_LOCATION}" --install-dir "${INSTALL_DIR}" --update -cd - -rm -rf "${tmpdir}" +echo "AWS CLI setup completed successfully." diff --git a/scripts/evergreen/setup_jq.sh b/scripts/evergreen/setup_jq.sh index e21d4a07e..5aaa2f3f6 100755 --- a/scripts/evergreen/setup_jq.sh +++ b/scripts/evergreen/setup_jq.sh @@ -7,7 +7,9 @@ set -Eeou pipefail -source scripts/dev/set_env_context.sh source scripts/funcs/install -download_and_install_binary "${PROJECT_DIR:-.}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64" +jq_arch=$(detect_architecture "jq") +echo "Detected architecture: ${jq_arch}" + +download_and_install_binary "${PROJECT_DIR:-${workdir}}/bin" jq "https://github.com/stedolan/jq/releases/download/jq-1.8.1/jq-linux-${jq_arch}" diff --git a/scripts/evergreen/setup_kind.sh b/scripts/evergreen/setup_kind.sh index 96b315a78..3df0aa620 100755 --- a/scripts/evergreen/setup_kind.sh +++ b/scripts/evergreen/setup_kind.sh @@ -2,15 +2,18 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +source scripts/funcs/install # Store the lowercase name of Operating System os=$(uname | tr '[:upper:]' '[:lower:]') +# Detect architecture +arch_suffix=$(detect_architecture) # This should be changed when needed latest_version="v0.27.0" mkdir -p "${PROJECT_DIR}/bin/" echo "Saving kind to ${PROJECT_DIR}/bin" -curl --retry 3 --silent -L "https://github.com/kubernetes-sigs/kind/releases/download/${latest_version}/kind-${os}-amd64" -o kind +curl --retry 3 --silent -L "https://github.com/kubernetes-sigs/kind/releases/download/${latest_version}/kind-${os}-${arch_suffix}" -o kind chmod +x kind sudo mv kind "${PROJECT_DIR}/bin" diff --git a/scripts/evergreen/setup_kubectl.sh b/scripts/evergreen/setup_kubectl.sh index ab9066ac1..46e86279f 100755 --- a/scripts/evergreen/setup_kubectl.sh +++ b/scripts/evergreen/setup_kubectl.sh @@ -2,23 +2,28 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh +source scripts/funcs/install + +# Detect the current architecture +ARCH=$(detect_architecture) +echo "Detected architecture: ${ARCH}" bindir="${PROJECT_DIR}/bin" tmpdir="${PROJECT_DIR}/tmp" mkdir -p "${bindir}" "${tmpdir}" -echo "Downloading latest kubectl" -curl -s --retry 3 -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl" +echo "Downloading latest kubectl for ${ARCH}" +curl -s --retry 3 -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/${ARCH}/kubectl" chmod +x kubectl echo "kubectl version --client" ./kubectl version --client mv kubectl "${bindir}" -echo "Downloading helm" +echo "Downloading helm for ${ARCH}" helm_archive="${tmpdir}/helm.tgz" helm_version="v3.17.1" -curl -s https://get.helm.sh/helm-${helm_version}-linux-amd64.tar.gz --output "${helm_archive}" +curl -s https://get.helm.sh/helm-${helm_version}-linux-${ARCH}.tar.gz --output "${helm_archive}" tar xfz "${helm_archive}" -C "${tmpdir}" &> /dev/null -mv "${tmpdir}/linux-amd64/helm" "${bindir}" +mv "${tmpdir}/linux-${ARCH}/helm" "${bindir}" "${bindir}"/helm version diff --git a/scripts/evergreen/setup_kubernetes_environment.sh b/scripts/evergreen/setup_kubernetes_environment.sh index 707231c9f..fb6e2ee00 100755 --- a/scripts/evergreen/setup_kubernetes_environment.sh +++ b/scripts/evergreen/setup_kubernetes_environment.sh @@ -7,7 +7,7 @@ source scripts/funcs/kubernetes # shellcheck disable=SC2154 bindir="${PROJECT_DIR}/bin" -if [[ "${KUBE_ENVIRONMENT_NAME}" == "vanilla" || ("${KUBE_ENVIRONMENT_NAME}" == "multi" && "${CLUSTER_TYPE}" == "kops") ]]; then +if [[ "${KUBE_ENVIRONMENT_NAME}" == "vanilla" || ("${KUBE_ENVIRONMENT_NAME}" == "multi" && "${CLUSTER_TYPE}" == "minikube") ]]; then export AWS_ACCESS_KEY_ID="${mms_eng_test_aws_access_key:?}" export AWS_SECRET_ACCESS_KEY="${mms_eng_test_aws_secret:?}" export AWS_DEFAULT_REGION="${mms_eng_test_aws_region:?}" @@ -30,6 +30,8 @@ elif [ "${KUBE_ENVIRONMENT_NAME}" = "kind" ] || [ "${KUBE_ENVIRONMENT_NAME}" = " scripts/dev/recreate_kind_cluster.sh "kind" elif [[ "${KUBE_ENVIRONMENT_NAME}" = "multi" && "${CLUSTER_TYPE}" == "kind" ]]; then scripts/dev/recreate_kind_clusters.sh +elif [[ "${KUBE_ENVIRONMENT_NAME}" = "minikube" ]]; then + echo "Nothing to do for minikube" else echo "KUBE_ENVIRONMENT_NAME not recognized" echo "value is <<${KUBE_ENVIRONMENT_NAME}>>. If empty it means it was not set" diff --git a/scripts/evergreen/setup_minikube_host.sh b/scripts/evergreen/setup_minikube_host.sh new file mode 100755 index 000000000..1c7d6d3a8 --- /dev/null +++ b/scripts/evergreen/setup_minikube_host.sh @@ -0,0 +1,76 @@ +#!/usr/bin/env bash + +# Consolidated setup script for minikube host with multi-architecture support +# This script groups all the setup steps needed for IBM machines and other architectures +# Can be run on static hosts for testing and verification + +source scripts/dev/set_env_context.sh +source scripts/funcs/install +set -Eeoux pipefail + +echo "==========================================" +echo "Setting up minikube host with multi-architecture support" +echo "Architecture: $(detect_architecture)" +echo "OS: $(uname -s)" +echo "==========================================" + +# Function to run a setup step with error handling and logging +run_setup_step() { + local step_name="$1" + shift + local script_command=("$@") + + echo "" + echo ">>> Running: ${step_name}" + echo ">>> Command: ${script_command[*]}" + + local script_path="${script_command[0]}" + if [[ -f "${script_path}" ]]; then + if "${script_command[@]}"; then + echo "✅ ${step_name} completed successfully" + else + echo "❌ ${step_name} failed" + exit 1 + fi + else + echo "❌ Script not found: ${script_path}" + exit 1 + fi +} + +# Setup Python environment (needed for AWS CLI pip installation) +export GRPC_PYTHON_BUILD_SYSTEM_OPENSSL=1 +export SKIP_INSTALL_REQUIREMENTS=true +run_setup_step "Python Virtual Environment" "scripts/dev/recreate_python_venv.sh" + +run_setup_step "AWS CLI Setup" "scripts/evergreen/setup_aws.sh" + +run_setup_step "kubectl and helm Setup" "scripts/evergreen/setup_kubectl.sh" + +run_setup_step "jq Setup" "scripts/evergreen/setup_jq.sh" + +run_setup_step "Minikube Host Setup with Container Runtime Detection" "scripts/minikube/setup_minikube_host.sh" + +export CONTAINER_RUNTIME=podman +run_setup_step "Container Registry Authentication" "scripts/dev/configure_container_auth.sh" + +# The minikube cluster is already started by the setup_minikube_host.sh script +echo "" +echo ">>> Minikube cluster startup completed by setup_minikube_host.sh" +echo "✅ Minikube cluster is ready for use" + +echo "" +echo "==========================================" +echo "✅ Minikube host setup completed successfully!" +echo "==========================================" +echo "" +echo "Installed tools summary:" +echo "- Python: $(python --version 2>/dev/null || python3 --version 2>/dev/null || echo 'Not found')" +echo "- AWS CLI: $(aws --version 2>/dev/null || echo 'Not found')" +echo "- kubectl: $(kubectl version --client 2>/dev/null || echo 'Not found')" +echo "- helm: $(helm version --short 2>/dev/null || echo 'Not found')" +echo "- jq: $(jq --version 2>/dev/null || echo 'Not found')" +echo "- Container Runtime: $(command -v podman &>/dev/null && echo "Podman $(podman --version 2>/dev/null)" || command -v docker &>/dev/null && echo "Docker $(docker --version 2>/dev/null)" || echo "Not found")" +echo "- Minikube: $(./bin/minikube version --short 2>/dev/null || echo 'Not found')" +echo "" +echo "Setup complete! Host is ready for minikube operations." diff --git a/scripts/evergreen/teardown_kubernetes_environment.sh b/scripts/evergreen/teardown_kubernetes_environment.sh index e5e2bd869..a9babfbed 100755 --- a/scripts/evergreen/teardown_kubernetes_environment.sh +++ b/scripts/evergreen/teardown_kubernetes_environment.sh @@ -5,6 +5,12 @@ set -Eeou pipefail source scripts/dev/set_env_context.sh if [ "${KUBE_ENVIRONMENT_NAME}" = "kind" ]; then + docker system prune -a -f echo "Deleting Kind cluster" kind delete clusters --all fi + +if [ "${KUBE_ENVIRONMENT_NAME}" = "minikube" ]; then + echo "Deleting minikube cluster" + "${PROJECT_DIR:-.}/bin/minikube" delete +fi diff --git a/scripts/funcs/install b/scripts/funcs/install index fee7fc657..978ed5275 100644 --- a/scripts/funcs/install +++ b/scripts/funcs/install @@ -2,6 +2,41 @@ set -euo pipefail +# Supported target formats: +# - "standard" (default): x86_64→amd64, aarch64|arm64→arm64, ppc64le→ppc64le, s390x→s390x +# - "jq": same as standard but ppc64le→ppc64el (jq's naming convention) +# +detect_architecture() { + local target_format="${1:-standard}" + local arch + arch=$(uname -m) + # Use standard mapping for most tools, with special case for jq's ppc64le naming + local ppc64_suffix="ppc64le" + if [[ "${target_format}" == "jq" ]]; then + ppc64_suffix="ppc64el" # jq uses ppc64el instead of ppc64le + fi + + case "${arch}" in + x86_64) + echo "amd64" + ;; + aarch64|arm64) + echo "arm64" + ;; + ppc64le) + echo "${ppc64_suffix}" + ;; + s390x) + echo "s390x" + ;; + *) + echo "Error: Unsupported architecture: ${arch}" >&2 + echo "Supported architectures: x86_64 (amd64), aarch64 (arm64), ppc64le, s390x" >&2 + return 1 + ;; + esac +} + # Downloads a binary from and moves it into directory. # Example usage: download_and_install_binary ${workdir}/bin jq "https://..." download_and_install_binary() { diff --git a/scripts/funcs/kubernetes b/scripts/funcs/kubernetes index 5377d8927..11250422d 100644 --- a/scripts/funcs/kubernetes +++ b/scripts/funcs/kubernetes @@ -98,15 +98,40 @@ create_image_registries_secret() { context=$1 namespace=$2 secret_name=$3 + + # Detect the correct config file path based on container runtime + local config_file + local temp_config_file="" + if command -v podman &> /dev/null && (podman info &> /dev/null || sudo podman info &> /dev/null); then + # For Podman, use root's auth.json since minikube uses sudo podman + config_file="/root/.config/containers/auth.json" + echo "Using Podman config: ${config_file}" + + # Create a temporary copy that the current user can read + temp_config_file=$(mktemp) + sudo cp "${config_file}" "${temp_config_file}" + sudo chown "$(whoami):$(whoami)" "${temp_config_file}" + config_file="${temp_config_file}" + else + # For Docker, use standard docker config + config_file="${HOME}/.docker/config.json" + echo "Using Docker config: ${config_file}" + fi + # shellcheck disable=SC2154 if kubectl --context "${context}" get namespace "${namespace}"; then kubectl --context "${context}" -n "${namespace}" delete secret "${secret_name}" --ignore-not-found echo "${context}: Creating ${namespace}/${secret_name} pull secret" kubectl --context "${context}" -n "${namespace}" create secret generic "${secret_name}" \ - --from-file=.dockerconfigjson="${HOME}/.docker/config.json" --type=kubernetes.io/dockerconfigjson + --from-file=.dockerconfigjson="${config_file}" --type=kubernetes.io/dockerconfigjson else echo "Skipping creating pull secret in ${context}/${namespace}. The namespace doesn't exist yet." fi + + # Clean up temporary file + if [[ -n "${temp_config_file}" ]] && [[ -f "${temp_config_file}" ]]; then + rm -f "${temp_config_file}" + fi } echo "Creating/updating pull secret from docker configured file" diff --git a/scripts/minikube/install-minikube.sh b/scripts/minikube/install-minikube.sh new file mode 100755 index 000000000..cacfd7618 --- /dev/null +++ b/scripts/minikube/install-minikube.sh @@ -0,0 +1,56 @@ +#!/usr/bin/env bash +set -Eeou pipefail + +source scripts/dev/set_env_context.sh +source scripts/funcs/install + +# Detect architecture +ARCH=$(detect_architecture) + +echo "Installing minikube on ${ARCH} architecture..." + +# Install crictl (container runtime CLI) +echo "Installing crictl for ${ARCH}..." +CRICTL_VERSION=$(curl -s https://api.github.com/repos/kubernetes-sigs/cri-tools/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') + +# Download and extract crictl tar.gz +mkdir -p "${PROJECT_DIR:-.}/bin" +CRICTL_URL="https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-${ARCH}.tar.gz" +echo "Downloading ${CRICTL_URL}" +TEMP_DIR=$(mktemp -d) +curl --retry 3 --silent -L "${CRICTL_URL}" -o "${TEMP_DIR}/crictl.tar.gz" +tar -xzf "${TEMP_DIR}/crictl.tar.gz" -C "${TEMP_DIR}/" +chmod +x "${TEMP_DIR}/crictl" +mv "${TEMP_DIR}/crictl" "${PROJECT_DIR:-.}/bin/crictl" +rm -rf "${TEMP_DIR}" +echo "Installed crictl to ${PROJECT_DIR:-.}/bin" + +# Also install crictl system-wide so minikube can find it +echo "Installing crictl system-wide..." +if [[ -f "${PROJECT_DIR:-.}/bin/crictl" ]]; then + # Install to both /usr/local/bin and /usr/bin for better PATH coverage + sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/local/bin/crictl + sudo cp "${PROJECT_DIR:-.}/bin/crictl" /usr/bin/crictl + sudo chmod +x /usr/local/bin/crictl + sudo chmod +x /usr/bin/crictl + echo "✅ crictl installed to /usr/local/bin/ and /usr/bin/" + + # Verify installation + if command -v crictl >/dev/null 2>&1; then + echo "✅ crictl is now available in PATH: $(which crictl)" + echo "✅ crictl version: $(crictl --version 2>/dev/null || echo 'version check failed')" + else + echo "⚠️ crictl installed but not found in PATH" + fi +else + echo "⚠️ crictl not found in project bin, minikube may have issues" +fi + +# Install minikube +echo "Installing minikube for ${ARCH}..." +MINIKUBE_VERSION=$(curl -s https://api.github.com/repos/kubernetes/minikube/releases/latest | grep '"tag_name":' | sed -E 's/.*"([^"]+)".*/\1/') + +# Download minikube for detected architecture +download_and_install_binary "${PROJECT_DIR:-.}/bin" minikube "https://github.com/kubernetes/minikube/releases/download/${MINIKUBE_VERSION}/minikube-linux-${ARCH}" + +echo "Crictl ${CRICTL_VERSION} and Minikube ${MINIKUBE_VERSION} installed successfully for ${ARCH}" diff --git a/scripts/minikube/minikube_host.sh b/scripts/minikube/minikube_host.sh new file mode 100755 index 000000000..971587ec3 --- /dev/null +++ b/scripts/minikube/minikube_host.sh @@ -0,0 +1,213 @@ +#!/usr/bin/env bash + +# This is a helper script for running tests on s390x Hosts. +# It allows to configure minikube clusters and expose remote API servers on a local machine to +# enable local development while running minikube cluster on s390x instance. +# Run "minikube_host.sh help" command to see the full usage. +# Similar to evg_host.sh but uses minikube instead of kind. + +set -Eeou pipefail + +test "${MDB_BASH_DEBUG:-0}" -eq 1 && set -x + +source scripts/dev/set_env_context.sh +source scripts/funcs/printing +source scripts/funcs/install + +if [[ -z "${MINIKUBE_HOST_NAME}" ]]; then + echo "MINIKUBE_HOST_NAME env var is missing" + echo "Set it to your s390x host connection string (e.g., user@hostname)" + exit 1 +fi + +get_host_url() { + echo "${MINIKUBE_HOST_NAME}" +} + +cmd=${1-""} + +if [[ "${cmd}" != "" && "${cmd}" != "help" ]]; then + host_url=$(get_host_url) +fi + +kubeconfig_path="${HOME}/.operator-dev/minikube-host.kubeconfig" + +configure() { + ssh -T -q "${host_url}" "sudo chown \$(whoami):\$(whoami) ~/.docker || true; mkdir -p ~/.docker" + if [[ -f "${HOME}/.docker/config.json" ]]; then + echo "Copying local ~/.docker/config.json authorization credentials to s390x host" + jq '. | with_entries(select(.key == "auths"))' "${HOME}/.docker/config.json" | ssh -T -q "${host_url}" 'cat > ~/.docker/config.json' + fi + + sync + + ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; scripts/dev/switch_context.sh root-context; scripts/minikube/setup_minikube_host.sh " +} + +sync() { + rsync --verbose --archive --compress --human-readable --recursive --progress \ + --delete --delete-excluded --max-size=1000000 --prune-empty-dirs \ + -e ssh \ + --include-from=.rsyncinclude \ + --exclude-from=.gitignore \ + --exclude-from=.rsyncignore \ + ./ "${host_url}:~/mongodb-kubernetes/" + + rsync --verbose --no-links --recursive --prune-empty-dirs --archive --compress --human-readable \ + --max-size=1000000 \ + -e ssh \ + ~/.operator-dev/ \ + "${host_url}:~/.operator-dev" & + + wait +} + +remote-prepare-local-e2e-run() { + set -x + sync + cmd make switch context=e2e_mdb_kind_ubi_cloudqa + cmd scripts/dev/prepare_local_e2e_run.sh + rsync --verbose --no-links --recursive --prune-empty-dirs --archive --compress --human-readable \ + --max-size=1000000 \ + -e ssh \ + "${host_url}:~/mongodb-kubernetes/.multi_cluster_local_test_files" \ + ./ & + scp "${host_url}:~/.operator-dev/multicluster_kubeconfig" "${KUBE_CONFIG_PATH}" & + + wait +} + +get-kubeconfig() { + # For minikube, we need to get the kubeconfig and certificates + echo "Getting kubeconfig from minikube on s390x host..." + + # Create local minikube directory structure + mkdir -p "${HOME}/.minikube" + + # Copy certificates from remote host + echo "Copying minikube certificates..." + scp "${host_url}:~/.minikube/ca.crt" "${HOME}/.minikube/" + scp "${host_url}:~/.minikube/client.crt" "${HOME}/.minikube/" + scp "${host_url}:~/.minikube/client.key" "${HOME}/.minikube/" + + # Get kubeconfig and update paths to local ones + ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; kubectl config view --raw" > "${kubeconfig_path}" + + # Update certificate paths to local paths + sed -i '' "s|/home/cloud-user/.minikube|${HOME}/.minikube|g" "${kubeconfig_path}" + + # Update server addresses to use localhost for tunneling + sed -i '' "s|https://192.168.[0-9]*.[0-9]*:\([0-9]*\)|https://127.0.0.1:\1|g" "${kubeconfig_path}" + + echo "Copied minikube kubeconfig and certificates to ${kubeconfig_path}" +} + +recreate-minikube-cluster() { + configure "$(detect_architecture)" 2>&1| prepend "minikube_host.sh configure" + echo "Recreating minikube cluster on ${MINIKUBE_HOST_NAME} (${host_url})..." + # shellcheck disable=SC2088 + ssh -T "${host_url}" "cd ~/mongodb-kubernetes; export KUBE_ENVIRONMENT_NAME=minikube; minikube delete || true; ./scripts/minikube/setup_minikube_host.sh" + echo "Copying kubeconfig to ${kubeconfig_path}" + get-kubeconfig +} + +tunnel() { + shift 1 + echo "Setting up tunnel for minikube cluster..." + + # Get the minikube API server port from remote host + local api_port + api_port=$(ssh -T -q "${host_url}" "cd ~/mongodb-kubernetes; minikube ip 2>/dev/null && echo ':8443' | tr -d '\n'") + + if [[ -z "${api_port}" ]]; then + echo "Could not determine minikube API server details. Is the cluster running?" + return 1 + fi + + # Extract just the port (8443) + local port="8443" + echo "Forwarding localhost:${port} to minikube cluster API server" + + # Forward the API server port through minikube + set -x + # shellcheck disable=SC2029 + ssh -L "${port}:$(ssh -T -q "${host_url}" "minikube ip"):${port}" "${host_url}" "$@" + set +x +} + +retry_with_sleep() { + shift 1 + cmd=$1 + local sleep_time + sleep_time=5 + + while true; do + ${cmd} || true + echo "Retrying command after ${sleep_time} of sleep: ${cmd}" + sleep 5; + done +} + +ssh_to_host() { + shift 1 + # shellcheck disable=SC2029 + ssh "$@" "${host_url}" +} + +upload-my-ssh-private-key() { + ssh -T -q "${host_url}" "mkdir -p ~/.ssh" + scp "${HOME}/.ssh/id_rsa" "${host_url}:~/.ssh/id_rsa" + scp "${HOME}/.ssh/id_rsa.pub" "${host_url}:~/.ssh/id_rsa.pub" + ssh -T -q "${host_url}" "chmod 700 ~/.ssh && chown -R \$(whoami):\$(whoami) ~/.ssh" +} + +cmd() { + if [[ "$1" == "cmd" ]]; then + shift 1 + fi + + cmd="cd ~/mongodb-kubernetes; $*" + ssh -T -q "${host_url}" "${cmd}" +} + +usage() { + echo "USAGE: + minikube_host.sh + +PREREQUISITES: + - s390x host with SSH access + - define MINIKUBE_HOST_NAME env var (e.g., export MINIKUBE_HOST_NAME=user@hostname) + - SSH key-based authentication configured + +COMMANDS: + configure installs on a host: calls sync, switches context, installs necessary software (auto-detects arch) + sync rsync of project directory + recreate-minikube-cluster recreates minikube cluster and executes get-kubeconfig + remote-prepare-local-e2e-run executes prepare-local-e2e on the remote host + get-kubeconfig copies remote minikube kubeconfig locally to ~/.operator-dev/minikube-host.kubeconfig + tunnel [args] creates ssh session with tunneling to all API servers + ssh [args] creates ssh session passing optional arguments to ssh + cmd [command with args] execute command as if being on s390x host + upload-my-ssh-private-key uploads your ssh keys (~/.ssh/id_rsa) to s390x host + help this message + +EXAMPLES: + export MINIKUBE_HOST_NAME=user@ibmz8 + minikube_host.sh tunnel + minikube_host.sh cmd 'make e2e test=replica_set' +" +} + +case ${cmd} in +configure) configure "$@" ;; +recreate-minikube-cluster) recreate-minikube-cluster "$@" ;; +get-kubeconfig) get-kubeconfig ;; +remote-prepare-local-e2e-run) remote-prepare-local-e2e-run ;; +ssh) ssh_to_host "$@" ;; +tunnel) retry_with_sleep tunnel "$@" ;; +sync) sync ;; +cmd) cmd "$@" ;; +upload-my-ssh-private-key) upload-my-ssh-private-key ;; +help) usage ;; +*) usage ;; +esac diff --git a/scripts/minikube/setup_minikube_host.sh b/scripts/minikube/setup_minikube_host.sh new file mode 100755 index 000000000..1d2618c32 --- /dev/null +++ b/scripts/minikube/setup_minikube_host.sh @@ -0,0 +1,256 @@ +#!/usr/bin/env bash + +# this script downloads necessary tooling for alternative architectures (s390x, ppc64le) using minikube (similar to setup_evg_host.sh) +source scripts/dev/set_env_context.sh +source scripts/funcs/install + +set -Eeou pipefail + +set_limits() { + echo "Increasing fs.inotify.max_user_instances" + sudo sysctl -w fs.inotify.max_user_instances=8192 + + echo "Increasing fs.inotify.max_user_watches" + sudo sysctl -w fs.inotify.max_user_watches=10485760 + + echo "Increasing the number of open files" + nofile_max=$(cat /proc/sys/fs/nr_open) + nproc_max=$(ulimit -u) + sudo tee -a /etc/security/limits.conf <>> Setting up local registry and custom kicbase image for ppc64le..." + + # Check if local registry is running (with fallback for namespace issues) + registry_running=false + if curl -s http://localhost:5000/v2/_catalog >/dev/null 2>&1; then + echo "Registry detected via HTTP check (podman ps failed)" + registry_running=true + fi + + if ! $registry_running; then + echo "Starting local container registry on port 5000..." + + # Clean up any existing registry first + sudo podman rm -f registry 2>/dev/null || true + + if ! sudo podman run -d -p 5000:5000 --name registry --restart=always docker.io/library/registry:2; then + echo "❌ Failed to start local registry - trying alternative approach" + exit 1 + fi + + # Wait for registry to be ready + echo "Waiting for registry to be ready..." + for i in {1..30}; do + if curl -s http://localhost:5000/v2/_catalog >/dev/null 2>&1; then + break + fi + sleep 1 + done + else + echo "✅ Local registry already running" + fi + + # Configure podman to trust local registry (both user and root level for minikube) + echo "Configuring registries.conf to trust local registry..." + + # User-level config + mkdir -p ~/.config/containers + cat > ~/.config/containers/registries.conf << 'EOF' +[[registry]] +location = "localhost:5000" +insecure = true +EOF + + # Root-level config (since minikube uses sudo podman) + sudo mkdir -p /root/.config/containers + sudo tee /root/.config/containers/registries.conf << 'EOF' >/dev/null +[[registry]] +location = "localhost:5000" +insecure = true +EOF + + echo "✅ Registry configuration created for both user and root" + custom_image_tag="localhost:5000/kicbase:v0.0.47" + + # Determine image tag + custom_image_tag="localhost:5000/kicbase:v0.0.47" + if curl -s http://localhost:5000/v2/kicbase/tags/list | grep -q "v0.0.47"; then + echo "Custom kicbase image already exists in local registry" + return 0 + fi + + # Build custom kicbase image with crictl + echo "Building custom kicbase image with crictl for ppc64le..." + + # Build custom kicbase image + mkdir -p "${PROJECT_DIR:-.}/scripts/minikube/kicbase" + cat > "${PROJECT_DIR:-.}/scripts/minikube/kicbase/Dockerfile" << 'EOF' +FROM gcr.io/k8s-minikube/kicbase:v0.0.47 +RUN if [ "$(uname -m)" = "ppc64le" ]; then \ + CRICTL_VERSION="v1.28.0" && \ + curl -L "https://github.com/kubernetes-sigs/cri-tools/releases/download/${CRICTL_VERSION}/crictl-${CRICTL_VERSION}-linux-ppc64le.tar.gz" \ + -o /tmp/crictl.tar.gz && \ + tar -C /usr/bin -xzf /tmp/crictl.tar.gz && \ + chmod +x /usr/bin/crictl && \ + rm /tmp/crictl.tar.gz; \ + fi +EOF + + cd "${PROJECT_DIR:-.}/scripts/minikube/kicbase" + sudo podman build -t "${custom_image_tag}" . || { + echo "Failed to build custom image" + return 1 + } + sudo podman push "${custom_image_tag}" --tls-verify=false || { + echo "Failed to push to registry" + return 1 + } + cd - >/dev/null + echo "Custom kicbase image ready: ${custom_image_tag}" + fi + return 0 +} + +# Start minikube with podman driver +start_minikube_cluster() { + echo ">>> Starting minikube cluster with podman driver..." + + # Clean up any existing minikube state to avoid cached configuration issues + echo "Cleaning up any existing minikube state..." + if [[ -d ~/.minikube/machines/minikube ]]; then + echo "Removing ~/.minikube/machines/minikube directory..." + rm -rf ~/.minikube/machines/minikube + fi + + echo "Ensuring clean minikube state..." + "${PROJECT_DIR:-.}/bin/minikube" delete 2>/dev/null || true + + local start_args=("--driver=podman") + + if [[ "${ARCH}" == "ppc64le" ]]; then + echo "Using custom kicbase image for ppc64le with crictl..." + + start_args+=("--base-image=localhost:5000/kicbase:v0.0.47") + start_args+=("--insecure-registry=localhost:5000") + fi + + # Use default bridge CNI to avoid Docker Hub rate limiting issues + # start_args+=("--cni=bridge") + + echo "Starting minikube with args: ${start_args[*]}" + if "${PROJECT_DIR:-.}/bin/minikube" start "${start_args[@]}"; then + echo "✅ Minikube started successfully" + else + echo "❌ Minikube failed to start" + echo "Minikube logs:" + "${PROJECT_DIR:-.}/bin/minikube" logs | tail -20 + return 1 + fi +} + +setup_podman() { + echo "Setting up podman for ${ARCH}..." + + # Check if podman is already available + if command -v podman &> /dev/null; then + echo "✅ Podman already installed" + + # Diagnose podman state + echo "=== Podman Diagnostics ===" + echo "User: $(whoami), UID: $(id -u)" + echo "User namespace support: $(cat /proc/self/uid_map 2>/dev/null || echo 'not available')" + echo "Systemctl user status:" + systemctl --user status podman.socket 2>/dev/null || echo "podman.socket not active" + echo "Running 'sudo podman info' command..." + sudo podman info 2>&1 + fi + + + # Configure podman to use cgroupfs instead of systemd in CI + mkdir -p ~/.config/containers + cat > ~/.config/containers/containers.conf << EOF +[containers] +cgroup_manager = "cgroupfs" +events_logger = "file" + +[engine] +cgroup_manager = "cgroupfs" +EOF + +} + +# Setup podman and container runtime +setup_podman +set_limits +download_minikube + +# Setup local registry and custom kicbase image for ppc64le if needed +setup_local_registry_and_custom_image + +echo "" +echo ">>> Verifying minikube installation..." +if command -v minikube &> /dev/null; then + minikube_version=$(minikube version --short 2>/dev/null || minikube version 2>/dev/null | head -n1) + echo "✅ Minikube installed successfully: ${minikube_version}" +else + echo "❌ Minikube installation failed - minikube command not found" + echo "Please check the installation logs above for errors" + exit 1 +fi + +if [[ "${ARCH}" == "ppc64le" ]]; then + echo "" + echo ">>> Note: crictl will be patched into the minikube container after startup" +else + echo "" + echo ">>> Using standard kicbase image (crictl included for x86_64/aarch64/s390x)" +fi + +# Start the minikube cluster +start_minikube_cluster + +# Update kubectl context to point to the running cluster +echo "" +echo ">>> Updating kubectl context for minikube cluster..." +"${PROJECT_DIR:-.}/bin/minikube" update-context +echo "✅ Kubectl context updated successfully" + +echo "Minikube host setup completed successfully for ${ARCH}!" + +# Final status +echo "" +echo "==========================================" +echo "✅ Setup Summary" +echo "==========================================" +echo "Architecture: ${ARCH}" +echo "Container Runtime: podman" +echo "Minikube Driver: podman" +echo "Minikube: Default cluster" +echo "Minikube: ${minikube_version}" +echo "CNI: bridge (default)" +if [[ "${ARCH}" == "ppc64le" ]]; then + echo "Special Config: Custom kicbase image with crictl via local registry" +fi diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index 3fee2cfe6..c29bc46cd 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -1,8 +1,7 @@ #!/usr/bin/env python3 -"""This pipeline script knows about the details of our Docker images -and where to fetch and calculate parameters. It uses Sonar.py -to produce the final images.""" +"""This atomic_pipeline script knows about the details of our Docker images +and where to fetch and calculate parameters.""" import json import os import shutil @@ -17,37 +16,174 @@ from packaging.version import Version from lib.base_logger import logger -from scripts.evergreen.release.images_signing import ( +from scripts.release.build.image_build_configuration import ImageBuildConfiguration +from scripts.release.build.image_build_process import execute_docker_build +from scripts.release.build.image_signing import ( mongodb_artifactory_login, sign_image, verify_signature, ) -from .build_configuration import BuildConfiguration -from .build_context import BuildScenario -from .build_images import execute_docker_build - TRACER = trace.get_tracer("evergreen-agent") -def get_tools_distro(tools_version: str) -> Dict[str, str]: - new_rhel_tool_version = "100.10.0" - default_distro = {"arm": "rhel90-aarch64", "amd": "rhel90-x86_64"} - if Version(tools_version) >= Version(new_rhel_tool_version): - return {"arm": "rhel93-aarch64", "amd": "rhel93-x86_64"} - return default_distro +def load_agent_build_info(): + """Load agent platform mappings from build_info_agent.json""" + with open("build_info_agent.json", "r") as f: + return json.load(f) -def load_release_file() -> Dict: - with open("release.json") as release: - return json.load(release) +def extract_tools_version_from_release(release: Dict) -> str: + """ + Extract tools version from release.json mongodbToolsBundle.ubi field. + + Args: + release: Release dictionary from release.json + + Returns: + Tools version string (e.g., "100.12.2") + """ + tools_bundle = release["mongodbToolsBundle"]["ubi"] + # Extract version from filename like "mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" + # The version is the last part before .tgz + version_part = tools_bundle.split("-")[-1] # Gets "100.12.2.tgz" + tools_version = version_part.replace(".tgz", "") # Gets "100.12.2" + return tools_version + + +def get_build_arg_names(platform: str) -> Dict[str, str]: + """ + Generate build argument names for a platform. + + Args: + platform: Platform string (e.g., "linux/amd64") + + Returns: + Dictionary with agent_build_arg and tools_build_arg keys + """ + # Extract architecture from platform (e.g., "amd64" from "linux/amd64") + arch = platform.split("/")[1] + + return { + "agent_build_arg": f"mongodb_agent_version_{arch}", + "tools_build_arg": f"mongodb_tools_version_{arch}" + } + + +def generate_tools_build_args(platforms: List[str], tools_version: str) -> Dict[str, str]: + """ + Generate build arguments for MongoDB tools based on platform mappings. + + Args: + platforms: List of platforms (e.g., ["linux/amd64", "linux/arm64"]) + tools_version: MongoDB tools version + + Returns: + Dictionary of build arguments for docker build (tools only) + """ + agent_info = load_agent_build_info() + build_args = {} + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + logger.warning(f"Platform {platform} not found in agent mappings, skipping") + continue + + mapping = agent_info["platform_mappings"][platform] + build_arg_names = get_build_arg_names(platform) + + # Generate tools build arg only + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" + build_args[build_arg_names["tools_build_arg"]] = tools_filename + + return build_args + + +def generate_agent_build_args(platforms: List[str], agent_version: str, tools_version: str) -> Dict[str, str]: + """ + Generate build arguments for agent image based on platform mappings. + + Args: + platforms: List of platforms (e.g., ["linux/amd64", "linux/arm64"]) + agent_version: MongoDB agent version + tools_version: MongoDB tools version + + Returns: + Dictionary of build arguments for docker build + """ + agent_info = load_agent_build_info() + build_args = {} + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + logger.warning(f"Platform {platform} not found in agent mappings, skipping") + continue + + mapping = agent_info["platform_mappings"][platform] + build_arg_names = get_build_arg_names(platform) + + # Generate agent build arg + agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" + build_args[build_arg_names["agent_build_arg"]] = agent_filename + + # Generate tools build arg + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" + build_args[build_arg_names["tools_build_arg"]] = tools_filename + + return build_args + + +@TRACER.start_as_current_span("build_image") +def build_image( + dockerfile_path: str, + build_configuration: ImageBuildConfiguration, + build_args: Dict[str, str] = None, + build_path: str = ".", +): + """ + Build an image then (optionally) sign the result. + """ + image_name = build_configuration.image_name() + span = trace.get_current_span() + span.set_attribute("mck.image_name", image_name) + + registry = build_configuration.base_registry + build_args = build_args or {} + + if build_args: + span.set_attribute("mck.build_args", str(build_args)) + logger.info(f"Building {image_name}, dockerfile args: {build_args}") + logger.debug(f"Build args: {build_args}") + logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") + logger.debug(f"build image generic - registry={registry}") + + # Build docker registry URI and call build_image + image_full_uri = f"{build_configuration.registry}:{build_configuration.version}" + + execute_docker_build( + tag=image_full_uri, + dockerfile=dockerfile_path, + path=build_path, + args=build_args, + push=True, + platforms=build_configuration.platforms, + ) + + if build_configuration.sign: + logger.info("Logging in MongoDB Artifactory for Garasign image") + mongodb_artifactory_login() + logger.info("Signing image") + sign_image(build_configuration.registry, build_configuration.version) + verify_signature(build_configuration.registry, build_configuration.version) -def build_tests_image(build_configuration: BuildConfiguration): + +def build_tests_image(build_configuration: ImageBuildConfiguration): """ Builds image used to run tests. """ - image_name = "mongodb-kubernetes-tests" # helm directory needs to be copied over to the tests docker context. helm_src = "helm_chart" @@ -66,46 +202,37 @@ def build_tests_image(build_configuration: BuildConfiguration): shutil.copyfile("release.json", "docker/mongodb-kubernetes-tests/release.json") shutil.copyfile("requirements.txt", requirements_dest) - python_version = os.getenv("PYTHON_VERSION", "3.11") + python_version = os.getenv("PYTHON_VERSION", "3.13") if python_version == "": raise Exception("Missing PYTHON_VERSION environment variable") - buildargs = {"PYTHON_VERSION": python_version} + build_args = dict({"PYTHON_VERSION": python_version}) build_image( - image_name=image_name, dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", build_configuration=build_configuration, - extra_args=buildargs, + build_args=build_args, build_path="docker/mongodb-kubernetes-tests", ) -def build_mco_tests_image(build_configuration: BuildConfiguration): +def build_mco_tests_image(build_configuration: ImageBuildConfiguration): """ Builds image used to run community tests. """ - image_name = "mongodb-community-tests" - golang_version = os.getenv("GOLANG_VERSION", "1.24") - if golang_version == "": - raise Exception("Missing GOLANG_VERSION environment variable") - - buildargs = {"GOLANG_VERSION": golang_version} build_image( - image_name=image_name, dockerfile_path="docker/mongodb-community-tests/Dockerfile", build_configuration=build_configuration, - extra_args=buildargs, ) -def build_operator_image(build_configuration: BuildConfiguration): +def build_operator_image(build_configuration: ImageBuildConfiguration): """Calculates arguments required to build the operator image, and starts the build process.""" # In evergreen, we can pass test_suffix env to publish the operator to a quay # repository with a given suffix. - test_suffix = os.environ.get("test_suffix", "") - log_automation_config_diff = os.environ.get("LOG_AUTOMATION_CONFIG_DIFF", "false") + test_suffix = os.getenv("test_suffix", "") + log_automation_config_diff = os.getenv("LOG_AUTOMATION_CONFIG_DIFF", "false") args = { "version": build_configuration.version, @@ -115,25 +242,23 @@ def build_operator_image(build_configuration: BuildConfiguration): logger.info(f"Building Operator args: {args}") - image_name = "mongodb-kubernetes" build_image( - image_name=image_name, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile.atomic", build_configuration=build_configuration, - extra_args=args, + build_args=args, ) -def build_database_image(build_configuration: BuildConfiguration): +def build_database_image(build_configuration: ImageBuildConfiguration): """ Builds a new database image. """ args = {"version": build_configuration.version} + build_image( - image_name="mongodb-kubernetes-database", dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile.atomic", build_configuration=build_configuration, - extra_args=args, + build_args=args, ) @@ -178,23 +303,26 @@ def find_om_url(om_version: str) -> str: return current_release -def build_init_om_image(build_configuration: BuildConfiguration): +def build_init_om_image(build_configuration: ImageBuildConfiguration): args = {"version": build_configuration.version} + build_image( - image_name="mongodb-kubernetes-init-ops-manager", dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", build_configuration=build_configuration, - extra_args=args, + build_args=args, ) -def build_om_image(build_configuration: BuildConfiguration): +def build_om_image(build_configuration: ImageBuildConfiguration): # Make this a parameter for the Evergreen build # https://github.com/evergreen-ci/evergreen/wiki/Parameterized-Builds om_version = os.environ.get("om_version") if om_version is None: raise ValueError("`om_version` should be defined.") + # Set the version in the build configuration (it is not provided in the build_configuration) + build_configuration.version = om_version + om_download_url = os.environ.get("om_download_url", "") if om_download_url == "": om_download_url = find_om_url(om_version) @@ -205,166 +333,85 @@ def build_om_image(build_configuration: BuildConfiguration): } build_image( - image_name="mongodb-enterprise-ops-manager-ubi", dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile.atomic", build_configuration=build_configuration, - extra_args=args, + build_args=args, ) -@TRACER.start_as_current_span("build_image") -def build_image( - image_name: str, - dockerfile_path: str, - build_configuration: BuildConfiguration, - extra_args: dict | None = None, - build_path: str = ".", -): - """ - Build an image then (optionally) sign the result. - """ - span = trace.get_current_span() - span.set_attribute("mck.image_name", image_name) - - registry = build_configuration.base_registry - args_list = extra_args or {} - - # merge in the registry without mutating caller's dict - build_args = {**args_list, "quay_registry": registry} - - if build_args: - span.set_attribute("mck.build_args", str(build_args)) - - logger.info(f"Building {image_name}, dockerfile args: {build_args}") - logger.debug(f"Build args: {build_args}") - logger.debug(f"Building {image_name} for platforms={build_configuration.platforms}") - logger.debug(f"build image generic - registry={registry}") - - # Build docker registry URI and call build_image - docker_registry = f"{build_configuration.base_registry}/{image_name}" - image_full_uri = f"{docker_registry}:{build_configuration.version}" +def build_init_appdb_image(build_configuration: ImageBuildConfiguration): + release = load_release_file() + base_url = "https://fastdl.mongodb.org/tools/db/" - execute_docker_build( - tag=image_full_uri, - dockerfile=dockerfile_path, - path=build_path, - args=build_args, - push=True, + # Extract tools version and generate platform-specific build args + tools_version = extract_tools_version_from_release(release) + platform_build_args = generate_tools_build_args( platforms=build_configuration.platforms, + tools_version=tools_version ) - if build_configuration.sign: - logger.info("Logging in MongoDB Artifactory for Garasign image") - mongodb_artifactory_login() - logger.info("Signing image") - sign_image(docker_registry, build_configuration.version) - verify_signature(docker_registry, build_configuration.version) - + args = { + "version": build_configuration.version, + "mongodb_tools_url": base_url, # Base URL for platform-specific downloads + **platform_build_args # Add the platform-specific build args + } -def build_init_appdb(build_configuration: BuildConfiguration): - release = load_release_file() - base_url = "https://fastdl.mongodb.org/tools/db/" - mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) - args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} build_image( - image_name="mongodb-kubernetes-init-appdb", dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", build_configuration=build_configuration, - extra_args=args, + build_args=args, ) # TODO: nam static: remove this once static containers becomes the default -def build_init_database(build_configuration: BuildConfiguration): +def build_init_database_image(build_configuration: ImageBuildConfiguration): release = load_release_file() base_url = "https://fastdl.mongodb.org/tools/db/" mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) - args = {"version": build_configuration.version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} - build_image( - "mongodb-kubernetes-init-database", - "docker/mongodb-kubernetes-init-database/Dockerfile.atomic", - build_configuration=build_configuration, - extra_args=args, - ) - -def build_community_image(build_configuration: BuildConfiguration, image_type: str): - """ - Builds image for community components (readiness probe, upgrade hook). - - Args: - build_configuration: The build configuration to use - image_type: Type of image to build ("readiness-probe" or "upgrade-hook") - """ - - if image_type == "readiness-probe": - image_name = "mongodb-kubernetes-readinessprobe" - dockerfile_path = "docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic" - elif image_type == "upgrade-hook": - image_name = "mongodb-kubernetes-operator-version-upgrade-post-start-hook" - dockerfile_path = "docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic" - else: - raise ValueError(f"Unsupported community image type: {image_type}") - - version = build_configuration.version - golang_version = os.getenv("GOLANG_VERSION", "1.24") + # Extract tools version and generate platform-specific build args + tools_version = extract_tools_version_from_release(release) + platform_build_args = generate_tools_build_args( + platforms=build_configuration.platforms, + tools_version=tools_version + ) - extra_args = { - "version": version, - "GOLANG_VERSION": golang_version, + args = { + "version": build_configuration.version, + "mongodb_tools_url": base_url, # Add the base URL for the Dockerfile + **platform_build_args # Add the platform-specific build args } build_image( - image_name=image_name, - dockerfile_path=dockerfile_path, + "docker/mongodb-kubernetes-init-database/Dockerfile.atomic", build_configuration=build_configuration, - extra_args=extra_args, + build_args=args, ) -def build_readiness_probe_image(build_configuration: BuildConfiguration): +def build_readiness_probe_image(build_configuration: ImageBuildConfiguration): """ Builds image used for readiness probe. """ - build_community_image(build_configuration, "readiness-probe") + + build_image( + dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", + build_configuration=build_configuration, + ) -def build_upgrade_hook_image(build_configuration: BuildConfiguration): +def build_upgrade_hook_image(build_configuration: ImageBuildConfiguration): """ Builds image used for version upgrade post-start hook. """ - build_community_image(build_configuration, "upgrade-hook") - - -def build_agent_pipeline( - build_configuration: BuildConfiguration, - operator_version: str, - agent_version: str, - agent_distro: str, - tools_version: str, - tools_distro: str, -): - image_version = f"{agent_version}_{operator_version}" - - build_configuration_copy = copy(build_configuration) - build_configuration_copy.version = image_version - args = { - "version": image_version, - "agent_version": agent_version, - "agent_distro": agent_distro, - "tools_version": tools_version, - "tools_distro": tools_distro, - } build_image( - image_name="mongodb-agent-ubi", - dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", - build_configuration=build_configuration_copy, - extra_args=args, + dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", + build_configuration=build_configuration, ) -def build_agent_default_case(build_configuration: BuildConfiguration): +def build_agent_default_case(build_configuration: ImageBuildConfiguration): """ Build the agent only for the latest operator for patches and operator releases. @@ -372,7 +419,7 @@ def build_agent_default_case(build_configuration: BuildConfiguration): release = load_release_file() # We need to release [all agents x latest operator] on operator releases - if build_configuration.scenario == BuildScenario.RELEASE: + if build_configuration.is_release_scenario(): agent_versions_to_build = gather_all_supported_agent_versions(release) # We only need [latest agents (for each OM major version and for CM) x patch ID] for patches else: @@ -392,10 +439,8 @@ def build_agent_default_case(build_configuration: BuildConfiguration): logger.info(f"Running with factor of {max_workers}") logger.info(f"======= Agent versions to build {agent_versions_to_build} =======") for idx, agent_tools_version in enumerate(agent_versions_to_build): - # We don't need to keep create and push the same image on every build. - # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. logger.info(f"======= Building Agent {agent_tools_version} ({idx}/{len(agent_versions_to_build)})") - _build_agent_operator( + _build_agent( agent_tools_version, build_configuration, executor, @@ -405,42 +450,6 @@ def build_agent_default_case(build_configuration: BuildConfiguration): queue_exception_handling(tasks_queue) -def queue_exception_handling(tasks_queue): - exceptions_found = False - for task in tasks_queue.queue: - if task.exception() is not None: - exceptions_found = True - logger.fatal(f"The following exception has been found when building: {task.exception()}") - if exceptions_found: - raise Exception( - f"Exception(s) found when processing Agent images. \nSee also previous logs for more info\nFailing the build" - ) - - -def _build_agent_operator( - agent_tools_version: Tuple[str, str], - build_configuration: BuildConfiguration, - executor: ProcessPoolExecutor, - tasks_queue: Queue, -): - agent_version = agent_tools_version[0] - agent_distro = "rhel9_x86_64" - tools_version = agent_tools_version[1] - tools_distro = get_tools_distro(tools_version)["amd"] - - tasks_queue.put( - executor.submit( - build_agent_pipeline, - build_configuration, - build_configuration.version, - agent_version, - agent_distro, - tools_version, - tools_distro, - ) - ) - - def gather_all_supported_agent_versions(release: Dict) -> List[Tuple[str, str]]: # This is a list of a tuples - agent version and corresponding tools version agent_versions_to_build = list() @@ -500,3 +509,80 @@ def gather_latest_agent_versions(release: Dict) -> List[Tuple[str, str]]: agent_versions_to_build.append(("107.0.12.8669-1", "100.10.0")) return sorted(list(set(agent_versions_to_build))) + + +def _build_agent( + agent_tools_version: Tuple[str, str], + build_configuration: ImageBuildConfiguration, + executor: ProcessPoolExecutor, + tasks_queue: Queue, +): + agent_version = agent_tools_version[0] + tools_version = agent_tools_version[1] + + tasks_queue.put( + executor.submit( + build_agent_pipeline, + build_configuration, + agent_version, + tools_version + ) + ) + + +def build_agent_pipeline( + build_configuration: ImageBuildConfiguration, + agent_version: str, + tools_version: str, +): + build_configuration_copy = copy(build_configuration) + build_configuration_copy.version = agent_version + print( + f"======== Building agent pipeline for version {agent_version}, build configuration version: {build_configuration.version}" + ) + + # Generate platform-specific build arguments using the mapping + platform_build_args = generate_agent_build_args( + platforms=build_configuration.platforms, + agent_version=agent_version, + tools_version=tools_version + ) + + args = { + "version": agent_version, + "agent_version": agent_version, + "mongodb_agent_url": "https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod", # TODO: migrate to build info, + "mongodb_tools_url": "https://fastdl.mongodb.org/tools/db", # TODO: migrate to build info, + **platform_build_args # Add the platform-specific build args + } + + build_image( + dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", + build_configuration=build_configuration_copy, + build_args=args, + ) + + +def queue_exception_handling(tasks_queue): + exceptions_found = False + for task in tasks_queue.queue: + if task.exception() is not None: + exceptions_found = True + logger.fatal(f"The following exception has been found when building: {task.exception()}") + if exceptions_found: + raise Exception( + f"Exception(s) found when processing Agent images. \nSee also previous logs for more info\nFailing the build" + ) + + +def get_tools_distro(tools_version: str) -> Dict[str, str]: + new_rhel_tool_version = "100.10.0" + default_distro = {"arm": "rhel90-aarch64", "amd": "rhel90-x86_64"} + if Version(tools_version) >= Version(new_rhel_tool_version): + return {"arm": "rhel93-aarch64", "amd": "rhel93-x86_64"} + return default_distro + + +def load_release_file() -> Dict: + with open("release.json") as release: + return json.load(release) diff --git a/scripts/release/atomic_pipeline_test.py b/scripts/release/atomic_pipeline_test.py new file mode 100644 index 000000000..d495ec283 --- /dev/null +++ b/scripts/release/atomic_pipeline_test.py @@ -0,0 +1,310 @@ +#!/usr/bin/env python3 +""" +Test for agent build mapping functionality in atomic_pipeline.py +""" + +import json +import unittest +from unittest.mock import patch + +# Local implementations to avoid import issues + + +def load_agent_build_info(): + """Load agent platform mappings from build_info_agent.json""" + with open("build_info_agent.json", "r") as f: + return json.load(f) + + +def get_build_arg_names(platform): + """Generate build argument names for a platform.""" + arch = platform.split("/")[1] + return { + "agent_build_arg": f"mongodb_agent_version_{arch}", + "tools_build_arg": f"mongodb_tools_version_{arch}" + } + + +def extract_tools_version_from_release(release): + """Extract tools version from release.json mongodbToolsBundle.ubi field.""" + tools_bundle = release["mongodbToolsBundle"]["ubi"] + version_part = tools_bundle.split("-")[-1] # Gets "100.12.2.tgz" + tools_version = version_part.replace(".tgz", "") # Gets "100.12.2" + return tools_version + + +def generate_tools_build_args(platforms, tools_version): + """Generate build arguments for MongoDB tools based on platform mappings.""" + agent_info = load_agent_build_info() + build_args = {} + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + print(f"Platform {platform} not found in agent mappings, skipping") + continue + + mapping = agent_info["platform_mappings"][platform] + build_arg_names = get_build_arg_names(platform) + + # Generate tools build arg only + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" + build_args[build_arg_names["tools_build_arg"]] = tools_filename + + return build_args + + +def generate_agent_build_args(platforms, agent_version, tools_version): + """ + Generate build arguments for agent image based on platform mappings. + This is the actual implementation from atomic_pipeline.py + """ + agent_info = load_agent_build_info() + build_args = {} + + for platform in platforms: + if platform not in agent_info["platform_mappings"]: + # Mock the logger warning for testing + print(f"Platform {platform} not found in agent mappings, skipping") + continue + + mapping = agent_info["platform_mappings"][platform] + build_arg_names = get_build_arg_names(platform) + + # Generate agent build arg + agent_filename = f"{agent_info['base_names']['agent']}-{agent_version}.{mapping['agent_suffix']}" + build_args[build_arg_names["agent_build_arg"]] = agent_filename + + # Generate tools build arg + tools_suffix = mapping["tools_suffix"].replace("{TOOLS_VERSION}", tools_version) + tools_filename = f"{agent_info['base_names']['tools']}-{tools_suffix}" + build_args[build_arg_names["tools_build_arg"]] = tools_filename + + return build_args + + +def _parse_dockerfile_build_args(dockerfile_path): + """Parse Dockerfile to extract expected build arguments using proper parsing.""" + build_args = set() + + with open(dockerfile_path, 'r') as f: + lines = f.readlines() + + for line in lines: + line = line.strip() + # Skip comments and empty lines + if not line or line.startswith('#'): + continue + + # Parse ARG instructions + if line.startswith('ARG '): + arg_part = line[4:].strip() # Remove 'ARG ' + + # Handle ARG with default values (ARG name=default) + arg_name = arg_part.split('=')[0].strip() + + build_args.add(arg_name) + + return build_args + + +class TestAgentBuildMapping(unittest.TestCase): + """Test cases for agent build mapping functionality.""" + + def setUp(self): + """Set up test fixtures.""" + # Load the actual build_info_agent.json file + with open("build_info_agent.json", "r") as f: + self.agent_build_info = json.load(f) + + def test_generate_agent_build_args_single_platform(self): + """Test generating build args for a single platform.""" + platforms = ["linux/amd64"] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + expected = { + "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz" + } + + self.assertEqual(result, expected) + + def test_generate_agent_build_args_multiple_platforms(self): + """Test generating build args for multiple platforms.""" + platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + expected = { + "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", + "mongodb_agent_version_arm64": "mongodb-mms-automation-agent-108.0.7.8810-1.amzn2_aarch64.tar.gz", + "mongodb_tools_version_arm64": "mongodb-database-tools-rhel88-aarch64-100.12.0.tgz", + "mongodb_agent_version_s390x": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel7_s390x.tar.gz", + "mongodb_tools_version_s390x": "mongodb-database-tools-rhel9-s390x-100.12.0.tgz", + "mongodb_agent_version_ppc64le": "mongodb-mms-automation-agent-108.0.7.8810-1.rhel8_ppc64le.tar.gz", + "mongodb_tools_version_ppc64le": "mongodb-database-tools-rhel9-ppc64le-100.12.0.tgz" + } + + self.assertEqual(result, expected) + + @patch('builtins.print') + def test_generate_agent_build_args_unknown_platform(self, mock_print): + """Test handling of unknown platforms.""" + platforms = ["linux/amd64", "linux/unknown"] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + # Should only include known platform + expected = { + "mongodb_agent_version_amd64": "mongodb-mms-automation-agent-108.0.7.8810-1.linux_x86_64.tar.gz", + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz" + } + + self.assertEqual(result, expected) + mock_print.assert_called_once_with("Platform linux/unknown not found in agent mappings, skipping") + + def test_generate_agent_build_args_empty_platforms(self): + """Test generating build args with empty platforms list.""" + platforms = [] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + self.assertEqual(result, {}) + + def test_build_args_match_dockerfile_requirements(self): + """Test that generated build args exactly match what the Dockerfile expects.""" + # Define the expected build args based on the platforms we support + # This is cleaner than parsing the Dockerfile and more explicit about our expectations + expected_dockerfile_args = { + "mongodb_agent_version_amd64", "mongodb_agent_version_arm64", + "mongodb_agent_version_s390x", "mongodb_agent_version_ppc64le", + "mongodb_tools_version_amd64", "mongodb_tools_version_arm64", + "mongodb_tools_version_s390x", "mongodb_tools_version_ppc64le" + } + + # Generate build args for all platforms + platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] + agent_version = "108.0.7.8810-1" + tools_version = "100.12.0" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + generated_build_args = set(result.keys()) + + # Verify that we generate exactly the build args the Dockerfile expects + self.assertEqual(generated_build_args, expected_dockerfile_args, + f"Generated build args {generated_build_args} don't match expected {expected_dockerfile_args}") + + # Verify the format of generated filenames matches what Dockerfile expects + for arg_name, filename in result.items(): + if "agent" in arg_name: + self.assertTrue(filename.startswith("mongodb-mms-automation-agent-")) + self.assertTrue(filename.endswith(".tar.gz")) + elif "tools" in arg_name: + self.assertTrue(filename.startswith("mongodb-database-tools-")) + self.assertTrue(filename.endswith(".tgz")) + + def test_dockerfile_contains_expected_args(self): + """Test that the Dockerfile actually contains the build args we expect.""" + dockerfile_path = "docker/mongodb-agent/Dockerfile.atomic" + + # Read the Dockerfile content + with open(dockerfile_path, 'r') as f: + dockerfile_content = f.read() + + # Define the expected build args + expected_args = [ + "mongodb_agent_version_amd64", "mongodb_agent_version_arm64", + "mongodb_agent_version_s390x", "mongodb_agent_version_ppc64le", + "mongodb_tools_version_amd64", "mongodb_tools_version_arm64", + "mongodb_tools_version_s390x", "mongodb_tools_version_ppc64le" + ] + + # Verify each expected arg is declared in the Dockerfile + for arg_name in expected_args: + self.assertIn(f"ARG {arg_name}", dockerfile_content, + f"Dockerfile should contain 'ARG {arg_name}' declaration") + + def test_generate_tools_build_args(self): + """Test generating tools-only build args.""" + platforms = ["linux/amd64", "linux/arm64"] + tools_version = "100.12.0" + + result = generate_tools_build_args(platforms, tools_version) + + expected = { + "mongodb_tools_version_amd64": "mongodb-database-tools-rhel88-x86_64-100.12.0.tgz", + "mongodb_tools_version_arm64": "mongodb-database-tools-rhel88-aarch64-100.12.0.tgz" + } + + self.assertEqual(result, expected) + + def test_extract_tools_version_from_release(self): + """Test extracting tools version from release.json structure.""" + release = { + "mongodbToolsBundle": { + "ubi": "mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" + } + } + + result = extract_tools_version_from_release(release) + self.assertEqual(result, "100.12.2") + + def test_tools_build_args_match_init_dockerfiles(self): + """Test that tools build args match what init-database and init-appdb Dockerfiles expect.""" + platforms = ["linux/amd64", "linux/arm64", "linux/s390x", "linux/ppc64le"] + tools_version = "100.12.0" + + result = generate_tools_build_args(platforms, tools_version) + + # Verify all expected tools build args are present (no agent args) + expected_tools_args = { + "mongodb_tools_version_amd64", "mongodb_tools_version_arm64", + "mongodb_tools_version_s390x", "mongodb_tools_version_ppc64le" + } + + generated_args = set(result.keys()) + self.assertEqual(generated_args, expected_tools_args) + + # Verify no agent args are included + for arg_name in result.keys(): + self.assertIn("tools", arg_name) + self.assertNotIn("agent", arg_name) + + def test_url_construction_correctness(self): + """Test that URLs are constructed correctly with proper trailing slashes.""" + # Test agent build args URL construction + platforms = ["linux/amd64"] + agent_version = "108.0.12.8846-1" + tools_version = "100.12.2" + + result = generate_agent_build_args(platforms, agent_version, tools_version) + + agent_base_url = "https://fastdl.mongodb.org/tools/mms-automation/" + tools_base_url = "https://fastdl.mongodb.org/tools/db/" + + agent_filename = result["mongodb_agent_version_amd64"] + tools_filename = result["mongodb_tools_version_amd64"] + + agent_url = f"{agent_base_url}{agent_filename}" + tools_url = f"{tools_base_url}{tools_filename}" + + expected_agent_url = "https://fastdl.mongodb.org/tools/mms-automation/mongodb-mms-automation-agent-108.0.12.8846-1.linux_x86_64.tar.gz" + expected_tools_url = "https://fastdl.mongodb.org/tools/db/mongodb-database-tools-rhel88-x86_64-100.12.2.tgz" + + self.assertEqual(agent_url, expected_agent_url) + self.assertEqual(tools_url, expected_tools_url) + + + +if __name__ == "__main__": + unittest.main() diff --git a/scripts/release/build/build_info.py b/scripts/release/build/build_info.py index 37222223c..a12a97ea8 100644 --- a/scripts/release/build/build_info.py +++ b/scripts/release/build/build_info.py @@ -1,65 +1,52 @@ import json +from dataclasses import dataclass from typing import Dict from scripts.release.build.build_scenario import BuildScenario from scripts.release.constants import DEFAULT_REPOSITORY_PATH, DEFAULT_CHANGELOG_PATH, RELEASE_INITIAL_VERSION_ENV_VAR, \ get_initial_version, get_initial_commit_sha +MEKO_TESTS_IMAGE = "meko-tests" +OPERATOR_IMAGE = "operator" +MCO_TESTS_IMAGE = "mco-tests" +READINESS_PROBE_IMAGE = "readiness-probe" +UPGRADE_HOOK_IMAGE = "upgrade-hook" +DATABASE_IMAGE = "database" +AGENT_IMAGE = "agent" +INIT_APPDB_IMAGE = "init-appdb" +INIT_DATABASE_IMAGE = "init-database" +INIT_OPS_MANAGER_IMAGE = "init-ops-manager" +OPS_MANAGER_IMAGE = "ops-manager" -class ImageInfo(dict): - def __init__(self, repository: str, platforms: list[str], version: str): - super().__init__() - self.repository = repository - self.platforms = platforms - self.version = version - def to_json(self): - return {"repository": self.repository, "platforms": self.platforms, "version": self.version} +@dataclass +class ImageInfo: + repository: str + platforms: list[str] + version: str + sign: bool -class BinaryInfo(dict): - def __init__(self, s3_store: str, platforms: list[str], version: str): - super().__init__() - self.s3_store = s3_store - self.platforms = platforms - self.version = version +@dataclass +class BinaryInfo: + s3_store: str + platforms: list[str] + version: str + sign: bool - def to_json(self): - return {"platforms": self.platforms, "version": self.version} +@dataclass +class HelmChartInfo: + repository: str + version: str + sign: bool -class HelmChartInfo(dict): - def __init__(self, repository: str, version: str): - super().__init__() - self.repository = repository - self.version = version - def to_json(self): - return {"repository": self.repository, "version": self.version} - - -class BuildInfo(dict): - def __init__( - self, images: Dict[str, ImageInfo], binaries: Dict[str, BinaryInfo], helm_charts: Dict[str, HelmChartInfo] - ): - super().__init__() - self.images = images - self.binaries = binaries - self.helm_charts = helm_charts - - def __dict__(self): - return { - "images": {name: images.__dict__ for name, images in self.images.items()}, - "binaries": {name: bin.__dict__ for name, bin in self.binaries.items()}, - "helm-charts": {name: chart.__dict__ for name, chart in self.helm_charts.items()}, - } - - def to_json(self): - return { - "images": {name: images.to_json() for name, images in self.images.items()}, - "binaries": {name: bin.to_json() for name, bin in self.binaries.items()}, - "helm-charts": {name: chart.to_json() for name, chart in self.helm_charts.items()}, - } +@dataclass +class BuildInfo: + images: Dict[str, ImageInfo] + binaries: Dict[str, BinaryInfo] + helm_charts: Dict[str, HelmChartInfo] def load_build_info(scenario: BuildScenario, @@ -90,22 +77,48 @@ def load_build_info(scenario: BuildScenario, images = {} for name, env_data in build_info["images"].items(): - data = env_data[scenario] + data = env_data.get(scenario) + if not data: + # If no data is available for the scenario, skip this image + continue + # Only update the image_version if it is not already set in the build_info.json file image_version = data.get("version") if not image_version: image_version = version - images[name] = ImageInfo(repository=data["repository"], platforms=data["platforms"], version=image_version) + images[name] = ImageInfo( + repository=data["repository"], + platforms=data["platforms"], + version=image_version, + sign=data.get("sign", False), + ) binaries = {} for name, env_data in build_info["binaries"].items(): - data = env_data[scenario] - binaries[name] = BinaryInfo(s3_store=data["s3-store"], platforms=data["platforms"], version=version) + data = env_data.get(scenario) + if not data: + # If no data is available for the scenario, skip this binary + continue + + binaries[name] = BinaryInfo( + s3_store=data["s3-store"], + platforms=data["platforms"], + version=version, + sign=data.get("sign", False), + ) helm_charts = {} for name, env_data in build_info["helm-charts"].items(): - data = env_data[scenario] - helm_charts[name] = HelmChartInfo(repository=data["repository"], version=version) + data = env_data.get(scenario) + if not data: + # If no data is available for the scenario, skip this helm-chart + continue + + helm_charts[name] = HelmChartInfo( + repository=data["repository"], + version=version, + sign=data.get("sign", False), + ) return BuildInfo(images=images, binaries=binaries, helm_charts=helm_charts) diff --git a/scripts/release/build/build_info_test.py b/scripts/release/build/build_info_test.py index 9d33a909e..20f563981 100644 --- a/scripts/release/build/build_info_test.py +++ b/scripts/release/build/build_info_test.py @@ -1,5 +1,7 @@ import os +from git import Repo + from scripts.release.build.build_info import ( BinaryInfo, BuildInfo, @@ -7,70 +9,102 @@ ImageInfo, load_build_info, ) -from git import Repo from scripts.release.build.build_scenario import BuildScenario def test_load_build_info_patch(git_repo: Repo): - build_id = "688364423f9b6c00072b3556" - os.environ["BUILD_ID"] = build_id + patch_id = "688364423f9b6c00072b3556" + os.environ["version_id"] = patch_id expected_build_info = BuildInfo( images={ - "mongodbOperator": ImageInfo( + "operator": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), - "initDatabase": ImageInfo( + "init-database": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), - "initAppDb": ImageInfo( + "init-appdb": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), - "initOpsManager": ImageInfo( + "init-ops-manager": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), "database": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, + ), + "mco-tests": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-community-tests", + platforms=["linux/amd64"], + version=patch_id, + sign=False, + ), + "meko-tests": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-tests", + platforms=["linux/amd64"], + version=patch_id, + sign=False, ), - "readinessprobe": ImageInfo( + "readiness-probe": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ), - "operator-version-upgrade-post-start-hook": ImageInfo( + "upgrade-hook": ImageInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-operator-version-upgrade-post-start-hook", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, + ), + "agent": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent-ubi", + platforms=["linux/amd64"], + version=patch_id, + sign=False, + ), + "ops-manager": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-enterprise-ops-manager", + platforms=["linux/amd64"], + version="om-version-from-release.json", + sign=False, ), }, binaries={ "kubectl-mongodb": BinaryInfo( s3_store="s3://kubectl-mongodb/dev", platforms=["linux/amd64"], - version=build_id, + version=patch_id, + sign=False, ) }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/helm-charts", - version=build_id, + version=patch_id, + sign=False, ) }, ) build_info = load_build_info(BuildScenario.PATCH, git_repo.working_dir) - assert build_info.__dict__() == expected_build_info.__dict__() + assert build_info == expected_build_info def test_load_build_info_staging(git_repo: Repo): @@ -80,40 +114,71 @@ def test_load_build_info_staging(git_repo: Repo): expected_build_info = BuildInfo( images={ - "mongodbOperator": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-stg", + "operator": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), - "initDatabase": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-init-database-stg", + "init-database": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-database", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), - "initAppDb": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-init-appdb-stg", + "init-appdb": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-appdb", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), - "initOpsManager": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-init-ops-manager-stg", + "init-ops-manager": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-ops-manager", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), "database": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-database-stg", + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-database", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, + ), + "mco-tests": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-community-tests", + platforms=["linux/amd64"], + version=expecter_commit_sha, + sign=False, ), - "readinessprobe": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-readinessprobe-stg", + "meko-tests": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-tests", + platforms=["linux/amd64"], + version=expecter_commit_sha, + sign=False, + ), + "readiness-probe": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, ), - "operator-version-upgrade-post-start-hook": ImageInfo( - repository="quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook-stg", + "upgrade-hook": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-operator-version-upgrade-post-start-hook", platforms=["linux/arm64", "linux/amd64"], version=expecter_commit_sha, + sign=True, + ), + "agent": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-agent-ubi", + platforms=["linux/arm64", "linux/amd64"], + version=expecter_commit_sha, + sign=True, + ), + "ops-manager": ImageInfo( + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-enterprise-ops-manager", + platforms=["linux/amd64"], + version="om-version-from-release.json", + sign=True, ), }, binaries={ @@ -121,19 +186,21 @@ def test_load_build_info_staging(git_repo: Repo): s3_store="s3://kubectl-mongodb/staging", platforms=["darwin/amd64", "darwin/arm64", "linux/amd64", "linux/arm64"], version=expecter_commit_sha, + sign=True, ) }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( - repository="quay.io/mongodb/helm-charts-stg", + repository="268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/helm-charts", version=expecter_commit_sha, + sign=True, ) }, ) build_info = load_build_info(BuildScenario.STAGING, git_repo.working_dir) - assert build_info.__dict__() == expected_build_info.__dict__() + assert build_info == expected_build_info def test_load_build_info_release(git_repo: Repo, readinessprobe_version: str, @@ -143,40 +210,59 @@ def test_load_build_info_release(git_repo: Repo, readinessprobe_version: str, expected_build_info = BuildInfo( images={ - "mongodbOperator": ImageInfo( + "operator": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), - "initDatabase": ImageInfo( + "init-database": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-init-database", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), - "initAppDb": ImageInfo( + "init-appdb": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-init-appdb", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), - "initOpsManager": ImageInfo( + "init-ops-manager": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-init-ops-manager", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), "database": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-database", platforms=["linux/arm64", "linux/amd64"], version=version, + sign=True, ), - "readinessprobe": ImageInfo( + "readiness-probe": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-readinessprobe", platforms=["linux/arm64", "linux/amd64"], version=readinessprobe_version, + sign=True, ), - "operator-version-upgrade-post-start-hook": ImageInfo( + "upgrade-hook": ImageInfo( repository="quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook", platforms=["linux/arm64", "linux/amd64"], version=operator_version_upgrade_post_start_hook_version, + sign=True, + ), + "agent": ImageInfo( + repository="quay.io/mongodb/mongodb-agent-ubi", + platforms=["linux/arm64", "linux/amd64"], + version=version, + sign=True, + ), + "ops-manager": ImageInfo( + repository="quay.io/mongodb/mongodb-enterprise-ops-manager", + platforms=["linux/amd64"], + version="om-version-from-release.json", + sign=True, ), }, binaries={ @@ -184,16 +270,18 @@ def test_load_build_info_release(git_repo: Repo, readinessprobe_version: str, s3_store="s3://kubectl-mongodb/prod", platforms=["darwin/amd64", "darwin/arm64", "linux/amd64", "linux/arm64"], version=version, + sign=True, ) }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( repository="quay.io/mongodb/helm-charts", version=version, + sign=True, ) }, ) build_info = load_build_info(BuildScenario.RELEASE, git_repo.working_dir) - assert build_info.__dict__() == expected_build_info.__dict__() + assert build_info == expected_build_info diff --git a/scripts/release/build/build_scenario.py b/scripts/release/build/build_scenario.py index 9dc28b8af..4650b581d 100644 --- a/scripts/release/build/build_scenario.py +++ b/scripts/release/build/build_scenario.py @@ -1,8 +1,9 @@ -import os from enum import StrEnum from git import Repo +from lib.base_logger import logger +from scripts.release.constants import triggered_by_git_tag, is_evg_patch, is_running_in_evg, get_version_id from scripts.release.version import calculate_next_version COMMIT_SHA_LENGTH = 8 @@ -12,6 +13,32 @@ class BuildScenario(StrEnum): RELEASE = "release" # Official release triggered by a git tag PATCH = "patch" # CI build for a patch/pull request STAGING = "staging" # CI build from a merge to the master + DEVELOPMENT = "development" # Local build on a developer machine + + @classmethod + def infer_scenario_from_environment(cls) -> "BuildScenario": + """Infer the build scenario from environment variables.""" + git_tag = triggered_by_git_tag() + is_patch = is_evg_patch() + is_evg = is_running_in_evg() + patch_id = get_version_id() + + if git_tag: + # Release scenario and the git tag will be used for promotion process only + scenario = BuildScenario.RELEASE + logger.info(f"Build scenario: {scenario} (git_tag: {git_tag})") + elif is_patch or is_evg: + scenario = BuildScenario.PATCH + logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") + # TODO: Uncomment the following lines when starting to work on staging builds + # elif is_evg: + # scenario = BuildScenario.STAGING + # logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") + else: + scenario = BuildScenario.DEVELOPMENT + logger.info(f"Build scenario: {scenario}") + + return scenario def get_version(self, repository_path: str, changelog_sub_path: str, initial_commit_sha: str = None, initial_version: str = None) -> str: @@ -19,13 +46,15 @@ def get_version(self, repository_path: str, changelog_sub_path: str, initial_com match self: case BuildScenario.PATCH: - build_id = os.environ["BUILD_ID"] - if not build_id: - raise ValueError(f"BUILD_ID environment variable is not set for `{self}` build scenario") - return build_id + patch_id = get_version_id() + if not patch_id: + raise ValueError(f"version_id environment variable is not set for `{self}` build scenario") + return patch_id case BuildScenario.STAGING: return repo.head.object.hexsha[:COMMIT_SHA_LENGTH] case BuildScenario.RELEASE: return calculate_next_version(repo, changelog_sub_path, initial_commit_sha, initial_version) + case BuildScenario.DEVELOPMENT: + return "test" raise ValueError(f"Unknown build scenario: {self}") diff --git a/scripts/release/build/conftest.py b/scripts/release/build/conftest.py index ae820b2da..bdde0952c 100644 --- a/scripts/release/build/conftest.py +++ b/scripts/release/build/conftest.py @@ -9,18 +9,16 @@ def get_manually_upgradable_versions() -> Dict[str, str]: build_info = json.load(f) return { - "readinessprobe": build_info["images"]["readinessprobe"]["release"]["version"], - "operator_version_upgrade_post_start_hook": build_info["images"]["operator-version-upgrade-post-start-hook"][ - "release" - ]["version"], + "readiness-probe": build_info["images"]["readiness-probe"]["release"]["version"], + "upgrade-hook": build_info["images"]["upgrade-hook"]["release"]["version"], } @fixture(scope="module") def readinessprobe_version() -> str: - return get_manually_upgradable_versions()["readinessprobe"] + return get_manually_upgradable_versions()["readiness-probe"] @fixture(scope="module") def operator_version_upgrade_post_start_hook_version() -> str: - return get_manually_upgradable_versions()["operator_version_upgrade_post_start_hook"] + return get_manually_upgradable_versions()["upgrade-hook"] diff --git a/scripts/release/build/image_build_configuration.py b/scripts/release/build/image_build_configuration.py new file mode 100644 index 000000000..5eb497759 --- /dev/null +++ b/scripts/release/build/image_build_configuration.py @@ -0,0 +1,27 @@ +from dataclasses import dataclass +from typing import List, Optional + +from scripts.release.build.build_scenario import BuildScenario + +SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] + + +@dataclass +class ImageBuildConfiguration: + scenario: BuildScenario + version: str + registry: str + + parallel: bool = False + parallel_factor: int = 0 + platforms: Optional[List[str]] = None + sign: bool = False + + def is_release_scenario(self) -> bool: + return self.scenario == BuildScenario.RELEASE + + def base_registry(self) -> str: + return self.registry.rpartition('/')[0] + + def image_name(self) -> str: + return self.registry.rpartition('/')[2] diff --git a/scripts/release/build_images.py b/scripts/release/build/image_build_process.py similarity index 87% rename from scripts/release/build_images.py rename to scripts/release/build/image_build_process.py index 8b9404eb8..48c283cc0 100644 --- a/scripts/release/build_images.py +++ b/scripts/release/build/image_build_process.py @@ -3,11 +3,11 @@ from typing import Dict import boto3 +import docker import python_on_whales from botocore.exceptions import BotoCoreError, ClientError from python_on_whales.exceptions import DockerException -import docker from lib.base_logger import logger DEFAULT_BUILDER_NAME = "multiarch" # Default buildx builder name @@ -48,17 +48,17 @@ def ensure_buildx_builder(builder_name: str = DEFAULT_BUILDER_NAME) -> str: :return: The builder name that was created or reused """ - docker = python_on_whales.docker + docker_cmd = python_on_whales.docker logger.info(f"Ensuring buildx builder '{builder_name}' exists...") - existing_builders = docker.buildx.list() + existing_builders = docker_cmd.buildx.list() if any(b.name == builder_name for b in existing_builders): logger.info(f"Builder '{builder_name}' already exists – reusing it.") - docker.buildx.use(builder_name) + docker_cmd.buildx.use(builder_name) return builder_name try: - docker.buildx.create( + docker_cmd.buildx.create( name=builder_name, driver="docker-container", use=True, @@ -73,13 +73,13 @@ def ensure_buildx_builder(builder_name: str = DEFAULT_BUILDER_NAME) -> str: def execute_docker_build( - tag: str, - dockerfile: str, - path: str, - args: Dict[str, str] = {}, - push: bool = True, - platforms: list[str] = None, - builder_name: str = DEFAULT_BUILDER_NAME, + tag: str, + dockerfile: str, + path: str, args: + Dict[str, str], + push: bool, + platforms: list[str], + builder_name: str = DEFAULT_BUILDER_NAME, ): """ Build a Docker image using python_on_whales and Docker Buildx for multi-architecture support. @@ -95,15 +95,11 @@ def execute_docker_build( # TODO CLOUDP-335471: use env variables to configure AWS region and account ID ecr_login_boto3(region="us-east-1", account_id="268558157000") - docker = python_on_whales.docker + docker_cmd = python_on_whales.docker try: # Convert build args to the format expected by python_on_whales - build_args = {k: str(v) for k, v in args.items()} if args else {} - - # Set default platforms if not specified - if platforms is None: - platforms = ["linux/amd64"] + build_args = {k: str(v) for k, v in args.items()} logger.info(f"Building image: {tag}") logger.info(f"Platforms: {platforms}") @@ -116,9 +112,10 @@ def execute_docker_build( logger.info(f"Multi-platform build for {len(platforms)} architectures") # Build the image using buildx, builder must be already initialized - docker.buildx.build( + docker_cmd.buildx.build( context_path=path, file=dockerfile, + # TODO: add tag for release builds (OLM immutable tag) tags=[tag], platforms=platforms, builder=builder_name, diff --git a/scripts/evergreen/release/images_signing.py b/scripts/release/build/image_signing.py similarity index 96% rename from scripts/evergreen/release/images_signing.py rename to scripts/release/build/image_signing.py index 9a5b50288..6bca81db7 100644 --- a/scripts/evergreen/release/images_signing.py +++ b/scripts/release/build/image_signing.py @@ -215,7 +215,7 @@ def sign_image(repository: str, tag: str) -> None: @TRACER.start_as_current_span("verify_signature") -def verify_signature(repository: str, tag: str) -> bool: +def verify_signature(repository: str, tag: str): start_time = time.time() span = trace.get_current_span() @@ -230,8 +230,7 @@ def verify_signature(repository: str, tag: str) -> bool: # Access the content of the file kubernetes_operator_public_key = r.text else: - logger.error(f"Failed to retrieve the public key from {public_key_url}: Status code {r.status_code}") - return False + raise Exception(f"Failed to retrieve the public key from {public_key_url}: Status code {r.status_code}") public_key_var_name = "OPERATOR_PUBLIC_KEY" additional_args = [ @@ -245,8 +244,7 @@ def verify_signature(repository: str, tag: str) -> bool: run_command_with_retries(command, retries=10) except subprocess.CalledProcessError as e: # Fail the pipeline if verification fails - logger.error(f"Failed to verify signature for image {image}: {e.stderr}") - raise + raise Exception(f"Failed to verify signature for image {image}") end_time = time.time() duration = end_time - start_time diff --git a/scripts/release/build_configuration.py b/scripts/release/build_configuration.py deleted file mode 100644 index 2228a6709..000000000 --- a/scripts/release/build_configuration.py +++ /dev/null @@ -1,19 +0,0 @@ -from dataclasses import dataclass -from typing import List, Optional - -from .build_context import BuildScenario - - -@dataclass -class BuildConfiguration: - scenario: BuildScenario - version: str - base_registry: str - - parallel: bool = False - parallel_factor: int = 0 - platforms: Optional[List[str]] = None - sign: bool = False - - def is_release_step_executed(self) -> bool: - return self.scenario == BuildScenario.RELEASE diff --git a/scripts/release/build_context.py b/scripts/release/build_context.py deleted file mode 100644 index d00d8de37..000000000 --- a/scripts/release/build_context.py +++ /dev/null @@ -1,89 +0,0 @@ -import os -from dataclasses import dataclass -from enum import Enum -from typing import Optional - -from lib.base_logger import logger - - -class BuildScenario(str, Enum): - """Represents the context in which the build is running.""" - - RELEASE = "release" # Official release triggered by a git tag - PATCH = "patch" # CI build for a patch/pull request - STAGING = "staging" # CI build from a merge to the master branch - DEVELOPMENT = "development" # Local build on a developer machine - - @classmethod - def infer_scenario_from_environment(cls) -> "BuildScenario": - """Infer the build scenario from environment variables.""" - git_tag = os.getenv("triggered_by_git_tag") - is_patch = os.getenv("is_patch", "false").lower() == "true" - is_evg = os.getenv("RUNNING_IN_EVG", "false").lower() == "true" - patch_id = os.getenv("version_id") - - if git_tag: - # Release scenario and the git tag will be used for promotion process only - scenario = BuildScenario.RELEASE - logger.info(f"Build scenario: {scenario} (git_tag: {git_tag})") - elif is_patch: - scenario = BuildScenario.PATCH - logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") - elif is_evg: - scenario = BuildScenario.STAGING - logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") - else: - scenario = BuildScenario.DEVELOPMENT - logger.info(f"Build scenario: {scenario}") - - return scenario - - -@dataclass -class BuildContext: - """Define build parameters based on the build scenario.""" - - scenario: BuildScenario - git_tag: Optional[str] = None - patch_id: Optional[str] = None - signing_enabled: bool = False - multi_arch: bool = True - version: Optional[str] = None - - @classmethod - def from_scenario(cls, scenario: BuildScenario) -> "BuildContext": - """Create build context from a given scenario.""" - git_tag = os.getenv("triggered_by_git_tag") - patch_id = os.getenv("version_id") - signing_enabled = scenario == BuildScenario.RELEASE - - return cls( - scenario=scenario, - git_tag=git_tag, - patch_id=patch_id, - signing_enabled=signing_enabled, - version=git_tag or patch_id, - ) - - def get_version(self) -> str: - """Gets the version that will be used to tag the images.""" - if self.scenario == BuildScenario.RELEASE: - return self.git_tag - if self.scenario == BuildScenario.STAGING: - # On master merges, always use "latest" (preserving legacy behavior) - return "latest" - if self.patch_id: - return self.patch_id - # Alternatively, we can fail here if no ID is explicitly defined - return "latest" - - def get_base_registry(self) -> str: - """Get the base registry URL for the current scenario.""" - # TODO CLOUDP-335471: when working on the promotion process, use the prod registry variable in RELEASE scenario - # TODO CLOUDP-335471: STAGING scenario should also push to STAGING_REPO_URL with version_id tag, - # in addition to the current ECR dev latest push (for backward compatibility) - # This will enable proper staging environment testing before production releases - - # For now, always use BASE_REPO_URL to preserve legacy behavior - # (STAGING pushes to ECR dev with "latest" tag) - return os.environ.get("BASE_REPO_URL") diff --git a/scripts/release/conftest.py b/scripts/release/conftest.py index 76410ba44..57199434e 100644 --- a/scripts/release/conftest.py +++ b/scripts/release/conftest.py @@ -1,8 +1,6 @@ -import json import os import shutil import tempfile -from typing import Dict from _pytest.fixtures import fixture from git import Repo @@ -169,9 +167,9 @@ def add_file(repo_path: str, src_file_path: str, dst_file_path: str | None = Non @fixture(scope="module") def readinessprobe_version() -> str: - return get_manually_upgradable_versions()["readinessprobe"] + return get_manually_upgradable_versions()["readiness-probe"] @fixture(scope="module") def operator_version_upgrade_post_start_hook_version() -> str: - return get_manually_upgradable_versions()["operator_version_upgrade_post_start_hook"] + return get_manually_upgradable_versions()["upgrade-hook"] diff --git a/scripts/release/constants.py b/scripts/release/constants.py index 694bba706..dc72cd7d9 100644 --- a/scripts/release/constants.py +++ b/scripts/release/constants.py @@ -14,3 +14,23 @@ def get_initial_version() -> str | None: def get_initial_commit_sha() -> str | None: return os.getenv(RELEASE_INITIAL_COMMIT_SHA_ENV_VAR) + + +def triggered_by_git_tag() -> str | None: + return os.getenv("triggered_by_git_tag") + + +def is_evg_patch() -> bool: + return os.getenv("is_patch", "false").lower() == "true" + + +def is_running_in_evg() -> bool: + return os.getenv("RUNNING_IN_EVG", "false").lower() == "true" + + +def get_version_id() -> str | None: + """ + Get the version ID from the environment variable. This is typically used for patch builds in the Evergreen CI system. + :return: version_id (patch ID) or None if not set + """ + return os.getenv("version_id") diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index e3b32aaaa..3e2ff736b 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -1,6 +1,5 @@ import argparse import os -import sys from typing import Callable, Dict from opentelemetry import context, trace @@ -19,8 +18,8 @@ from scripts.release.atomic_pipeline import ( build_agent_default_case, build_database_image, - build_init_appdb, - build_init_database, + build_init_appdb_image, + build_init_database_image, build_init_om_image, build_mco_tests_image, build_om_image, @@ -29,51 +28,122 @@ build_tests_image, build_upgrade_hook_image, ) -from scripts.release.build_configuration import BuildConfiguration -from scripts.release.build_context import ( - BuildContext, +from scripts.release.build.build_info import ( + AGENT_IMAGE, + DATABASE_IMAGE, + INIT_APPDB_IMAGE, + INIT_DATABASE_IMAGE, + INIT_OPS_MANAGER_IMAGE, + MCO_TESTS_IMAGE, + MEKO_TESTS_IMAGE, + OPERATOR_IMAGE, + OPS_MANAGER_IMAGE, + READINESS_PROBE_IMAGE, + UPGRADE_HOOK_IMAGE, + load_build_info, +) +from scripts.release.build.build_scenario import ( BuildScenario, ) -from scripts.release.build_images import DEFAULT_BUILDER_NAME, ensure_buildx_builder +from scripts.release.build.image_build_configuration import ( + SUPPORTED_PLATFORMS, + ImageBuildConfiguration, +) +from scripts.release.build.image_build_process import ( + DEFAULT_BUILDER_NAME, + ensure_buildx_builder, +) """ -The goal of main.py, build_configuration.py and build_context.py is to provide a single source of truth for the build +The goal of main.py, image_build_configuration.py and build_context.py is to provide a single source of truth for the build configuration. All parameters that depend on the the build environment (local dev, evg, etc) should be resolved here and not in the pipeline. """ -SUPPORTED_PLATFORMS = ["linux/amd64", "linux/arm64"] - def get_builder_function_for_image_name() -> Dict[str, Callable]: """Returns a dictionary of image names that can be built.""" image_builders = { - "test": build_tests_image, - "operator": build_operator_image, - "mco-test": build_mco_tests_image, - "readiness-probe": build_readiness_probe_image, - "upgrade-hook": build_upgrade_hook_image, - "database": build_database_image, - "agent": build_agent_default_case, - # + MEKO_TESTS_IMAGE: build_tests_image, + OPERATOR_IMAGE: build_operator_image, + MCO_TESTS_IMAGE: build_mco_tests_image, + READINESS_PROBE_IMAGE: build_readiness_probe_image, + UPGRADE_HOOK_IMAGE: build_upgrade_hook_image, + DATABASE_IMAGE: build_database_image, + AGENT_IMAGE: build_agent_default_case, # Init images - "init-appdb": build_init_appdb, - "init-database": build_init_database, - "init-ops-manager": build_init_om_image, - # + INIT_APPDB_IMAGE: build_init_appdb_image, + INIT_DATABASE_IMAGE: build_init_database_image, + INIT_OPS_MANAGER_IMAGE: build_init_om_image, # Ops Manager image - "ops-manager": build_om_image, + OPS_MANAGER_IMAGE: build_om_image, } return image_builders -def build_image(image_name: str, build_configuration: BuildConfiguration): +def build_image(image_name: str, build_configuration: ImageBuildConfiguration): """Builds one of the supported images by its name.""" + if image_name not in get_builder_function_for_image_name(): + raise ValueError( + f"Image '{image_name}' is not supported. Supported images: {', '.join(get_builder_function_for_image_name().keys())}" + ) get_builder_function_for_image_name()[image_name](build_configuration) +def image_build_config_from_args(args) -> ImageBuildConfiguration: + image = args.image + + build_scenario = get_scenario_from_arg(args.scenario) or BuildScenario.infer_scenario_from_environment() + + build_info = load_build_info(build_scenario) + logger.info(f"image is {image}") + logger.info(f"images are {build_info.images}") + image_build_info = build_info.images.get(image) + logger.info(f"image_build_info is {image_build_info}") + if not image_build_info: + raise ValueError(f"Image '{image}' is not defined in the build info for scenario '{build_scenario}'") + + # Resolve final values with overrides + version = args.version or image_build_info.version + registry = args.registry or image_build_info.repository + platforms = get_platforms_from_arg(args.platform) or image_build_info.platforms + sign = args.sign or image_build_info.sign + + return ImageBuildConfiguration( + scenario=build_scenario, + version=version, + registry=registry, + parallel=args.parallel, + platforms=platforms, + sign=sign, + parallel_factor=args.parallel_factor, + ) + + +def get_scenario_from_arg(args_scenario: str) -> BuildScenario | None: + if not args_scenario: + return None + + try: + return BuildScenario(args_scenario) + except ValueError as e: + raise ValueError(f"Invalid scenario '{args_scenario}': {e}") + + +def get_platforms_from_arg(args_platforms: str) -> list[str] | None: + if not args_platforms: + return None + + platforms = [p.strip() for p in args_platforms.split(",")] + if any(p not in SUPPORTED_PLATFORMS for p in platforms): + raise ValueError( + f"Unsupported platform in --platforms '{args_platforms}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}" + ) + return platforms + + def _setup_tracing(): trace_id = os.environ.get("otel_trace_id") parent_id = os.environ.get("otel_parent_id") @@ -106,12 +176,10 @@ def _setup_tracing(): def main(): - _setup_tracing() parser = argparse.ArgumentParser(description="Build container images.") parser.add_argument("image", help="Image to build.") # Required parser.add_argument("--parallel", action="store_true", help="Build images in parallel.") - parser.add_argument("--debug", action="store_true", help="Enable debug logging.") parser.add_argument("--sign", action="store_true", help="Sign images.") parser.add_argument( "--scenario", @@ -121,8 +189,7 @@ def main(): # Override arguments for build context and configuration parser.add_argument( "--platform", - default="linux/amd64", - help="Target platforms for multi-arch builds (comma-separated). Example: linux/amd64,linux/arm64. Defaults to linux/amd64.", + help="Override the platforms instead of resolving from build scenario. Multi-arch builds are comma-separated. Example: linux/amd64,linux/arm64", ) parser.add_argument( "--version", @@ -142,7 +209,7 @@ def main(): args = parser.parse_args() - build_config = build_config_from_args(args) + build_config = image_build_config_from_args(args) logger.info(f"Building image: {args.image}") logger.info(f"Build configuration: {build_config}") @@ -153,41 +220,5 @@ def main(): build_image(args.image, build_config) -def build_config_from_args(args): - # Validate that the image name is supported - supported_images = get_builder_function_for_image_name().keys() - if args.image not in supported_images: - logger.error(f"Unsupported image '{args.image}'. Supported images: {', '.join(supported_images)}") - sys.exit(1) - - # Parse platform argument (comma-separated) - platforms = [p.strip() for p in args.platform.split(",")] - if any(p not in SUPPORTED_PLATFORMS for p in platforms): - logger.error( - f"Unsupported platform in '{args.platform}'. Supported platforms: {', '.join(SUPPORTED_PLATFORMS)}" - ) - sys.exit(1) - - # Centralized configuration management with overrides - build_scenario = args.scenario or BuildScenario.infer_scenario_from_environment() - build_context = BuildContext.from_scenario(build_scenario) - - # Resolve final values with overrides - scenario = args.scenario or build_context.scenario - version = args.version or build_context.get_version() - registry = args.registry or build_context.get_base_registry() - sign = args.sign or build_context.signing_enabled - - return BuildConfiguration( - scenario=scenario, - version=version, - base_registry=registry, - parallel=args.parallel, - platforms=platforms, - sign=sign, - parallel_factor=args.parallel_factor, - ) - - if __name__ == "__main__": main() diff --git a/scripts/release/release_info.py b/scripts/release/release_info.py index 40fc7f3bc..201f4cec9 100644 --- a/scripts/release/release_info.py +++ b/scripts/release/release_info.py @@ -2,7 +2,17 @@ import json import pathlib -from scripts.release.build.build_info import load_build_info +from scripts.release.build.build_info import ( + DATABASE_IMAGE, + INIT_APPDB_IMAGE, + INIT_DATABASE_IMAGE, + INIT_OPS_MANAGER_IMAGE, + OPERATOR_IMAGE, + READINESS_PROBE_IMAGE, + UPGRADE_HOOK_IMAGE, + BuildInfo, + load_build_info, +) from scripts.release.build.build_scenario import BuildScenario from scripts.release.constants import ( DEFAULT_CHANGELOG_PATH, @@ -10,6 +20,16 @@ DEFAULT_REPOSITORY_PATH, ) +RELEASE_INFO_IMAGES_ORDERED = [ + OPERATOR_IMAGE, + INIT_DATABASE_IMAGE, + INIT_APPDB_IMAGE, + INIT_OPS_MANAGER_IMAGE, + DATABASE_IMAGE, + READINESS_PROBE_IMAGE, + UPGRADE_HOOK_IMAGE, +] + def create_release_info_json( repository_path: str, changelog_sub_path: str, initial_commit_sha: str = None, initial_version: str = None @@ -22,7 +42,40 @@ def create_release_info_json( initial_version=initial_version, ) - return json.dumps(build_info.to_json(), indent=2) + release_info_json = convert_to_release_info_json(build_info) + + return json.dumps(release_info_json, indent=2) + + +def convert_to_release_info_json(build_info: BuildInfo) -> dict: + output = { + "images": {}, + "binaries": {}, + "helm-charts": {}, + } + # Filter (and order) images to include only those relevant for release info + images = {name: build_info.images[name] for name in RELEASE_INFO_IMAGES_ORDERED} + + for name, image in images.items(): + output["images"][name] = { + "repository": image.repository, + "platforms": image.platforms, + "version": image.version, + } + + for name, binary in build_info.binaries.items(): + output["binaries"][name] = { + "platforms": binary.platforms, + "version": binary.version, + } + + for name, chart in build_info.helm_charts.items(): + output["helm-charts"][name] = { + "repository": chart.repository, + "version": chart.version, + } + + return output if __name__ == "__main__": diff --git a/scripts/release/release_info_test.py b/scripts/release/release_info_test.py index 2f820037a..213f5d8e6 100644 --- a/scripts/release/release_info_test.py +++ b/scripts/release/release_info_test.py @@ -13,22 +13,22 @@ def test_create_release_info_json( expected_json = { "images": { - "mongodbOperator": { + "operator": { "repository": "quay.io/mongodb/mongodb-kubernetes", "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", }, - "initDatabase": { + "init-database": { "repository": "quay.io/mongodb/mongodb-kubernetes-init-database", "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", }, - "initAppDb": { + "init-appdb": { "repository": "quay.io/mongodb/mongodb-kubernetes-init-appdb", "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", }, - "initOpsManager": { + "init-ops-manager": { "repository": "quay.io/mongodb/mongodb-kubernetes-init-ops-manager", "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", @@ -38,12 +38,12 @@ def test_create_release_info_json( "platforms": ["linux/arm64", "linux/amd64"], "version": "1.2.0", }, - "readinessprobe": { + "readiness-probe": { "repository": "quay.io/mongodb/mongodb-kubernetes-readinessprobe", "platforms": ["linux/arm64", "linux/amd64"], "version": readinessprobe_version, }, - "operator-version-upgrade-post-start-hook": { + "upgrade-hook": { "repository": "quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook", "platforms": ["linux/arm64", "linux/amd64"], "version": operator_version_upgrade_post_start_hook_version,