diff --git a/.evergreen-functions.yml b/.evergreen-functions.yml index 5c7636210..e59918a8d 100644 --- a/.evergreen-functions.yml +++ b/.evergreen-functions.yml @@ -46,6 +46,9 @@ variables: - workdir # temporary secret to pull community private preview image from quay.io - community_private_preview_pullsecret_dockerconfigjson + - RELEASE_INITIAL_VERSION + - RELEASE_INITIAL_COMMIT_SHA + - OVERRIDE_OPERATOR_VERSION functions: @@ -564,34 +567,8 @@ functions: working_dir: src/github.com/mongodb/mongodb-kubernetes binary: scripts/dev/run_python.sh scripts/release/pipeline_main.py --parallel ${image_name} ${all_agents} ${build_scenario} - # TODO: CLOUDP-335471 ; once all image builds are made with the new atomic pipeline, remove the following function - legacy_pipeline: + release_operator_pipeline: - *switch_context - - command: shell.exec - type: setup - params: - shell: bash - script: | - # Docker Hub workaround - # docker buildx needs the moby/buildkit image when setting up a builder so we pull it from our mirror - docker buildx create --driver=docker-container --driver-opt=image=268558157000.dkr.ecr.eu-west-1.amazonaws.com/docker-hub-mirrors/moby/buildkit:buildx-stable-1 --use - docker buildx inspect --bootstrap - - command: ec2.assume_role - display_name: Assume IAM role with permissions to pull Kondukto API token - params: - role_arn: ${kondukto_role_arn} - - command: shell.exec - display_name: Pull Kondukto API token from AWS Secrets Manager and write it to file - params: - silent: true - shell: bash - include_expansions_in_env: [AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_SESSION_TOKEN] - script: | - set -e - # use AWS CLI to get the Kondukto API token from AWS Secrets Manager - kondukto_token=$(aws secretsmanager get-secret-value --secret-id "kondukto-token" --region "us-east-1" --query 'SecretString' --output text) - # write the KONDUKTO_TOKEN environment variable to Silkbomb environment file - echo "KONDUKTO_TOKEN=$kondukto_token" > ${workdir}/silkbomb.env - command: subprocess.exec retry_on_failure: true type: setup @@ -599,7 +576,8 @@ functions: shell: bash <<: *e2e_include_expansions_in_env working_dir: src/github.com/mongodb/mongodb-kubernetes - binary: scripts/dev/run_python.sh pipeline.py --include ${image_name} --parallel --sign + # By default, use the git tag that triggered the task which can be overridden with OVERRIDE_OPERATOR_VERSION + binary: scripts/dev/run_python.sh scripts/release/pipeline_main.py ${image_name} --build-scenario release --version ${OVERRIDE_OPERATOR_VERSION|*triggered_by_git_tag} teardown_cloud_qa_all: - *switch_context @@ -845,3 +823,65 @@ functions: - task_name script: | ./scripts/code_snippets/${task_name}_test.sh + + # + # kubectl mongodb plugin release functions + # + install_goreleaser: + - command: shell.exec + type: setup + include_expansions_in_env: + - goreleaser_pro_tar_gz + params: + script: | + set -Eeu pipefail + curl -fL "${goreleaser_pro_tar_gz}" --output goreleaser_Linux_x86_64.tar.gz + tar -xf goreleaser_Linux_x86_64.tar.gz + chmod 755 ./goreleaser + + install_macos_notarization_service: + - command: shell.exec + type: setup + params: + include_expansions_in_env: + - notary_service_url + script: | + set -Eeu pipefail + + curl "${notary_service_url}" --output macos-notary.zip + unzip -u macos-notary.zip + chmod 755 ./linux_amd64/macnotary + + release_kubectl_mongodb_plugin: + - command: github.generate_token + params: + expansion_name: generated_token + - command: shell.exec + type: setup + params: + working_dir: src/github.com/mongodb/mongodb-kubernetes + include_expansions_in_env: + - GRS_USERNAME + - GRS_PASSWORD + - PKCS11_URI + - ARTIFACTORY_URL + - ARTIFACTORY_PASSWORD + - SIGNING_IMAGE_URI + - macos_notary_keyid + - macos_notary_secret + - workdir + - triggered_by_git_tag + env: + XDG_CONFIG_HOME: ${go_base_path}${workdir} + GO111MODULE: "on" + GOROOT: "/opt/golang/go1.24" + MACOS_NOTARY_KEY: ${macos_notary_keyid} + MACOS_NOTARY_SECRET: ${macos_notary_secret} + GORELEASER_CURRENT_TAG: ${triggered_by_git_tag} + # shell.exec EVG Task doesn't have add_to_path, so we need to explicitly add the path export below. + script: | + set -Eeu pipefail + + export PATH=$GOROOT/bin:$PATH + export GITHUB_TOKEN=${generated_token} + ${workdir}/goreleaser release --clean diff --git a/.evergreen-kubectlplugin.yml b/.evergreen-kubectlplugin.yml deleted file mode 100644 index 72129009a..000000000 --- a/.evergreen-kubectlplugin.yml +++ /dev/null @@ -1,99 +0,0 @@ -variables: - - &go_env - XDG_CONFIG_HOME: ${go_base_path}${workdir} - GO111MODULE: "on" - GOROOT: "/opt/golang/go1.24" -functions: - "clone": - - command: subprocess.exec - type: setup - params: - command: "mkdir -p src/github.com/mongodb" - - command: git.get_project - type: setup - params: - directory: src/github.com/mongodb/mongodb-kubernetes - - "install goreleaser": - - command: shell.exec - type: setup - include_expansions_in_env: - - goreleaser_pro_tar_gz - params: - script: | - set -Eeu pipefail - curl -fL "${goreleaser_pro_tar_gz}" --output goreleaser_Linux_x86_64.tar.gz - tar -xf goreleaser_Linux_x86_64.tar.gz - chmod 755 ./goreleaser - - "install macos notarization service": - - command: shell.exec - type: setup - params: - include_expansions_in_env: - - notary_service_url - script: | - set -Eeu pipefail - - curl "${notary_service_url}" --output macos-notary.zip - unzip -u macos-notary.zip - chmod 755 ./linux_amd64/macnotary - "release": - - command: github.generate_token - params: - expansion_name: generated_token - - command: shell.exec - type: setup - params: - working_dir: src/github.com/mongodb/mongodb-kubernetes - include_expansions_in_env: - - GRS_USERNAME - - GRS_PASSWORD - - PKCS11_URI - - ARTIFACTORY_URL - - ARTIFACTORY_PASSWORD - - SIGNING_IMAGE_URI - - macos_notary_keyid - - macos_notary_secret - - workdir - - triggered_by_git_tag - env: - <<: *go_env - MACOS_NOTARY_KEY: ${macos_notary_keyid} - MACOS_NOTARY_SECRET: ${macos_notary_secret} - # TODO: CLOUDP-318631 triggered_by_git_tag is not set, because we run the task manually - GORELEASER_CURRENT_TAG: ${triggered_by_git_tag} - # shell.exec EVG Task doesn't have add_to_path, so we need to explicitly add the path export below. - script: | - set -Eeu pipefail - - export PATH=$GOROOT/bin:$PATH - export GITHUB_TOKEN=${generated_token} - ${workdir}/goreleaser release --clean - -tasks: - - name: package_goreleaser - allowed_requesters: ["patch", "github_tag"] - tags: ["packaging"] - commands: - - func: "clone" - - func: "install goreleaser" - - func: "install macos notarization service" - - func: "release" - # add a noop task because if the only task in a variant is git_tag_only: true Evergreen doesn't start it at all - - name: noop - commands: - - command: shell.exec - params: - shell: bash - script: echo "this is the noop task" - -buildvariants: - # This variant is run when a new tag is out similar to github actions. - - name: release_mcli - display_name: Release Go multi-cluster binary - run_on: - - ubuntu2204-small - tasks: - - name: package_goreleaser - - name: noop diff --git a/.evergreen-release.yml b/.evergreen-release.yml new file mode 100644 index 000000000..3c3ee8b3d --- /dev/null +++ b/.evergreen-release.yml @@ -0,0 +1,186 @@ +include: + - filename: .evergreen-functions.yml + +tasks: + + - name: release_operator + tags: [ "image_release" ] + allowed_requesters: [ "patch", "github_tag" ] + commands: + - func: clone + - func: setup_building_host + - func: quay_login + - func: release_operator_pipeline + vars: + image_name: operator + + # Releases init images to Quay + - name: release_init_appdb + tags: [ "image_release" ] + allowed_requesters: [ "patch", "github_tag" ] + commands: + - func: clone + - func: setup_building_host + - func: quay_login + - func: release_operator_pipeline + vars: + image_name: init-appdb + + - name: release_init_database + tags: [ "image_release" ] + allowed_requesters: [ "patch", "github_tag" ] + commands: + - func: clone + - func: setup_building_host + - func: quay_login + - func: release_operator_pipeline + vars: + image_name: init-database + + - name: release_init_ops_manager + tags: [ "image_release" ] + allowed_requesters: [ "patch", "github_tag" ] + commands: + - func: clone + - func: setup_building_host + - func: quay_login + - func: release_operator_pipeline + vars: + image_name: init-ops-manager + + - name: release_database + tags: [ "image_release" ] + allowed_requesters: [ "patch", "github_tag" ] + commands: + - func: clone + - func: setup_building_host + - func: quay_login + - func: release_operator_pipeline + vars: + image_name: database + + - name: prepare_and_upload_openshift_bundles + tags: [ "openshift_bundles" ] + allowed_requesters: [ "patch", "github_tag" ] + commands: + - func: clone + - func: setup_aws + - func: configure_docker_auth + - func: setup_prepare_openshift_bundles + - func: prepare_openshift_bundles + - func: update_evergreen_expansions + - func: upload_openshift_bundle + vars: + # mongoDbOperator expansion is added in update_evergreen_expansions func from release.json + bundle_file_name: "mck-operator-certified-${mongodbOperator}.tgz" + + - name: run_conditionally_prepare_and_upload_openshift_bundles + tags: [ "openshift_bundles" ] + allowed_requesters: [ "patch", "github_tag" ] + commands: + - func: clone + - func: run_task_conditionally + vars: + condition_script: scripts/evergreen/should_prepare_openshift_bundles.sh + variant: prepare_openshift_bundles + task: prepare_and_upload_openshift_bundles + + - name: release_kubectl_mongodb_plugin + allowed_requesters: [ "patch", "github_tag" ] + tags: [ "binary_release" ] + commands: + - func: clone + - func: install_goreleaser + - func: install_macos_notarization_service + - func: release_kubectl_mongodb_plugin + +### Release build variants +buildvariants: + + - name: release_images + display_name: release_images + tags: [ "release" ] + allowed_requesters: [ "patch", "github_tag" ] + max_hosts: -1 + run_on: + - release-ubuntu2204-large # This is required for CISA attestation https://jira.mongodb.org/browse/DEVPROD-17780 + tasks: + - name: release_operator + - name: release_init_appdb + - name: release_init_database + - name: release_init_ops_manager + - name: release_database + + - name: preflight_release_images + display_name: preflight_release_images + tags: [ "release" ] + allowed_requesters: [ "patch", "github_tag" ] + depends_on: + - name: "*" + variant: release_images + run_on: + - rhel90-large + expansions: + preflight_submit: true + tasks: + - name: preflight_images_task_group + + - name: prepare_openshift_bundles + display_name: prepare_openshift_bundles + tags: [ "release" ] + allowed_requesters: [ "patch", "github_tag" ] + depends_on: + - name: "*" + variant: release_images + - name: "*" + variant: preflight_release_images + run_on: + - ubuntu2204-large + tasks: + - name: run_conditionally_prepare_and_upload_openshift_bundles + + - name: prerelease_gke_code_snippets + display_name: prerelease_gke_code_snippets + tags: [ "release" ] + allowed_requesters: ["patch", "github_tag"] + depends_on: + - variant: release_images + name: '*' + patch_optional: true + run_on: + - ubuntu2204-small + tasks: + - name: gke_code_snippets_task_group + + - name: e2e_smoke + display_name: e2e_smoke + tags: [ "e2e_smoke_release_test_suite" ] + run_on: + - ubuntu2204-large + allowed_requesters: [ "patch", "github_tag" ] + depends_on: + - name: "*" + variant: release_images + tasks: + - name: e2e_smoke_task_group + + - name: e2e_static_smoke + display_name: e2e_static_smoke + tags: [ "e2e_smoke_release_test_suite" ] + run_on: + - ubuntu2204-large + allowed_requesters: [ "patch", "github_tag" ] + depends_on: + - name: "*" + variant: release_images + tasks: + - name: e2e_smoke_task_group + + - name: release_kubectl_mongodb_plugin + display_name: release_kubectl_mongodb_plugin + tags: [ "release" ] + run_on: + - release-ubuntu2204-small # This is required for CISA attestation https://jira.mongodb.org/browse/DEVPROD-17780 + allowed_requesters: [ "patch", "github_tag" ] + tasks: + - name: release_kubectl_mongodb_plugin diff --git a/.evergreen.yml b/.evergreen.yml index b1ab8a9bf..62473a279 100644 --- a/.evergreen.yml +++ b/.evergreen.yml @@ -5,6 +5,7 @@ include: - filename: .evergreen-functions.yml - filename: .evergreen-tasks.yml - filename: .evergreen-mco.yml + - filename: .evergreen-release.yml variables: - &ops_manager_60_latest 6.0.27 # The order/index is important, since these are anchors. Please do not change @@ -231,7 +232,7 @@ patch_aliases: task: ".*" - alias: "release" variant_tags: [ "release", "e2e_smoke_release_test_suite" ] - task_tags: [ "image_release", "image_preflight", "openshift_bundles", "code_snippets", "patch-run" ] + task_tags: [ "image_release", "binary_release", "image_preflight", "openshift_bundles", "code_snippets", "patch-run" ] - alias: "smoke_test_release" variant_tags: [ "e2e_smoke_release_test_suite" ] task_tags: [ "patch-run" ] @@ -262,7 +263,7 @@ github_checks_aliases: git_tag_aliases: - git_tag: "^(\\d+\\.)?(\\d+\\.)?(\\d+)$" variant_tags: [ "release", "e2e_smoke_release_test_suite" ] - task_tags: [ "image_release", "image_preflight", "openshift_bundles", "code_snippets", "patch-run" ] + task_tags: [ "image_release", "image_preflight", "binary_release", "openshift_bundles", "code_snippets", "patch-run" ] tasks: - name: unit_tests_golang @@ -289,59 +290,6 @@ tasks: commands: - func: lint_repo - - name: release_operator - tags: [ "image_release" ] - allowed_requesters: [ "patch", "github_tag" ] - commands: - - func: clone - - func: setup_building_host - - func: quay_login - - func: setup_docker_sbom - - func: legacy_pipeline - vars: - image_name: operator - include_tags: release - - # Releases init images to Quay - - name: release_init_appdb - tags: [ "image_release" ] - allowed_requesters: [ "patch", "github_tag" ] - commands: - - func: clone - - func: setup_building_host - - func: quay_login - - func: setup_docker_sbom - - func: legacy_pipeline - vars: - image_name: init-appdb - include_tags: release - - - name: release_init_database - tags: [ "image_release" ] - allowed_requesters: [ "patch", "github_tag" ] - commands: - - func: clone - - func: setup_building_host - - func: quay_login - - func: setup_docker_sbom - - func: legacy_pipeline - vars: - image_name: init-database - include_tags: release - - - name: release_init_ops_manager - tags: [ "image_release" ] - allowed_requesters: [ "patch", "github_tag" ] - commands: - - func: clone - - func: setup_building_host - - func: quay_login - - func: setup_docker_sbom - - func: legacy_pipeline - vars: - image_name: init-ops-manager - include_tags: release - # pct only triggers this variant once a new agent image is out - name: release_agent # this enables us to run this variant either manually (patch) which pct does or during an OM bump (github_pr) @@ -548,18 +496,6 @@ tasks: variant: e2e_operator_perf_thirty size: small - - name: release_database - tags: [ "image_release" ] - allowed_requesters: [ "patch", "github_tag" ] - commands: - - func: clone - - func: setup_building_host - - func: quay_login - - func: setup_docker_sbom - - func: legacy_pipeline - vars: - image_name: database - - name: build_om_images commands: - func: clone @@ -567,7 +503,6 @@ tasks: - func: pipeline vars: image_name: ops-manager - skip_tags: release - name: publish_ops_manager commands: @@ -588,30 +523,6 @@ tasks: - func: setup_prepare_openshift_bundles - func: prepare_openshift_bundles_for_e2e - - name: prepare_and_upload_openshift_bundles - tags: [ "openshift_bundles" ] - commands: - - func: clone - - func: setup_aws - - func: configure_docker_auth - - func: setup_prepare_openshift_bundles - - func: prepare_openshift_bundles - - func: update_evergreen_expansions - - func: upload_openshift_bundle - vars: - # mongoDbOperator expansion is added in update_evergreen_expansions func from release.json - bundle_file_name: "mck-operator-certified-${mongodbOperator}.tgz" - - - name: run_conditionally_prepare_and_upload_openshift_bundles - tags: [ "openshift_bundles" ] - commands: - - func: clone - - func: run_task_conditionally - vars: - condition_script: scripts/evergreen/should_prepare_openshift_bundles.sh - variant: prepare_openshift_bundles - task: prepare_and_upload_openshift_bundles - - name: backup_csv_images_dry_run commands: - func: clone @@ -1457,25 +1368,13 @@ buildvariants: tasks: - name: e2e_operator_race_with_telemetry_task_group - - name: e2e_smoke - display_name: e2e_smoke - tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] - run_on: - - ubuntu2204-large - allowed_requesters: [ "patch", "github_tag" ] - depends_on: - - name: build_test_image - variant: init_test_run - tasks: - - name: e2e_smoke_task_group - - name: e2e_smoke_ibm_power display_name: e2e_smoke_ibm_power - tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + tags: [ "e2e_test_suite" ] run_on: - rhel9-power-small - rhel9-power-large - allowed_requesters: [ "patch", "github_tag" , "commit"] + allowed_requesters: [ "patch", "commit"] depends_on: - name: build_operator_ubi variant: init_test_run @@ -1492,13 +1391,13 @@ buildvariants: tasks: - name: e2e_smoke_ibm_task_group - - name: e2e_smoke_ibm_z - display_name: e2e_smoke_ibm_z - tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] + - name: e2e_static_smoke_ibm_power + display_name: e2e_static_smoke_ibm_power + tags: [ "e2e_test_suite", "static" ] run_on: - - rhel9-zseries-small - - rhel9-zseries-large - allowed_requesters: [ "patch", "github_tag", "commit"] + - rhel9-power-small + - rhel9-power-large + allowed_requesters: [ "patch", "commit"] depends_on: - name: build_operator_ubi variant: init_test_run @@ -1511,37 +1410,17 @@ buildvariants: - name: build_init_om_images_ubi variant: init_test_run - name: build_test_image_ibm - variant: init_test_run_ibm_z + variant: init_test_run_ibm_power tasks: - name: e2e_smoke_ibm_task_group - - name: e2e_smoke_arm - display_name: e2e_smoke_arm - tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite" ] - run_on: - - ubuntu2204-arm64-large - allowed_requesters: [ "patch", "github_tag", "commit"] - <<: *base_no_om_image_dependency - tasks: - - name: e2e_smoke_arm_task_group - - - name: e2e_static_smoke_arm - display_name: e2e_smoke_arm - tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite", "static" ] - run_on: - - ubuntu2204-arm64-large - allowed_requesters: [ "patch", "github_tag", "commit"] - <<: *base_no_om_image_dependency - tasks: - - name: e2e_smoke_arm_task_group - - - name: e2e_static_smoke_ibm_z - display_name: e2e_static_smoke_ibm_z - tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite", "static" ] + - name: e2e_smoke_ibm_z + display_name: e2e_smoke_ibm_z + tags: [ "e2e_test_suite" ] run_on: - rhel9-zseries-small - rhel9-zseries-large - allowed_requesters: [ "patch", "github_tag", "commit"] + allowed_requesters: [ "patch", "commit"] depends_on: - name: build_operator_ubi variant: init_test_run @@ -1558,13 +1437,13 @@ buildvariants: tasks: - name: e2e_smoke_ibm_task_group - - name: e2e_static_smoke_ibm_power - display_name: e2e_static_smoke_ibm_power - tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite", "static" ] + - name: e2e_static_smoke_ibm_z + display_name: e2e_static_smoke_ibm_z + tags: [ "e2e_test_suite", "static" ] run_on: - - rhel9-power-small - - rhel9-power-large - allowed_requesters: [ "patch", "github_tag", "commit"] + - rhel9-zseries-small + - rhel9-zseries-large + allowed_requesters: [ "patch", "commit"] depends_on: - name: build_operator_ubi variant: init_test_run @@ -1577,21 +1456,29 @@ buildvariants: - name: build_init_om_images_ubi variant: init_test_run - name: build_test_image_ibm - variant: init_test_run_ibm_power + variant: init_test_run_ibm_z tasks: - name: e2e_smoke_ibm_task_group - - name: e2e_static_smoke - display_name: e2e_static_smoke - tags: [ "e2e_test_suite", "e2e_smoke_release_test_suite", "static" ] + - name: e2e_smoke_arm + display_name: e2e_smoke_arm + tags: [ "e2e_test_suite" ] run_on: - - ubuntu2204-large - allowed_requesters: [ "patch", "github_tag" ] - depends_on: - - name: build_test_image - variant: init_test_run + - ubuntu2204-arm64-large + allowed_requesters: [ "patch", "commit"] + <<: *base_no_om_image_dependency + tasks: + - name: e2e_smoke_arm_task_group + + - name: e2e_static_smoke_arm + display_name: e2e_smoke_arm + tags: [ "e2e_test_suite", "static" ] + run_on: + - ubuntu2204-arm64-large + allowed_requesters: [ "patch", "commit"] + <<: *base_no_om_image_dependency tasks: - - name: e2e_smoke_task_group + - name: e2e_smoke_arm_task_group - name: e2e_multi_cluster_kind display_name: e2e_multi_cluster_kind @@ -1898,62 +1785,6 @@ buildvariants: tasks: - name: preflight_om_image - ### Release build variants - - ## Adds versions as supported in the supported versions Database. - - name: release_images - display_name: release_images - tags: [ "release" ] - allowed_requesters: [ "patch", "github_tag" ] - max_hosts: -1 - run_on: - - release-ubuntu2204-large # This is required for CISA attestation https://jira.mongodb.org/browse/DEVPROD-17780 - depends_on: - - name: build_operator_ubi - variant: init_test_run - - name: build_init_om_images_ubi - variant: init_test_run - - name: build_init_appdb_images_ubi - variant: init_test_run - - name: build_init_database_image_ubi - variant: init_test_run - - name: build_database_image_ubi - variant: init_test_run - tasks: - - name: release_operator - - name: release_init_appdb - - name: release_init_database - - name: release_init_ops_manager - - name: release_database - - - name: preflight_release_images - display_name: preflight_release_images - tags: [ "release" ] - allowed_requesters: [ "patch", "github_tag" ] - depends_on: - - name: "*" - variant: release_images - run_on: - - rhel90-large - expansions: - preflight_submit: true - tasks: - - name: preflight_images_task_group - - - name: prepare_openshift_bundles - display_name: prepare_openshift_bundles - tags: [ "release" ] - allowed_requesters: [ "patch", "github_tag" ] - depends_on: - - name: "*" - variant: release_images - - name: "*" - variant: preflight_release_images - run_on: - - ubuntu2204-large - tasks: - - name: run_conditionally_prepare_and_upload_openshift_bundles - # It will be called by pct while bumping the agent cloud manager image - name: release_agent display_name: release_agent @@ -2005,19 +1836,6 @@ buildvariants: tasks: - name: gke_code_snippets_task_group - - name: prerelease_gke_code_snippets - display_name: prerelease_gke_code_snippets - tags: [ "release" ] - allowed_requesters: ["patch", "github_tag"] - depends_on: - - variant: release_images - name: '*' - patch_optional: true - run_on: - - ubuntu2204-small - tasks: - - name: gke_code_snippets_task_group - - name: private_gke_code_snippets display_name: private_gke_code_snippets allowed_requesters: ["patch"] diff --git a/.github/workflows/preview_release_notes.yml b/.github/workflows/preview_release_notes.yml index 316c3176a..ddf73d690 100644 --- a/.github/workflows/preview_release_notes.yml +++ b/.github/workflows/preview_release_notes.yml @@ -31,12 +31,12 @@ jobs: with: python-version: ${{ env.PYTHON_VERSION }} - name: Generate Release Notes - run: python -m scripts.release.release_notes -s $INITIAL_COMMIT_SHA -v $INITIAL_VERSION -o release_notes_tmp.md + run: python -m scripts.release.release_notes -s $RELEASE_INITIAL_COMMIT_SHA -v $RELEASE_INITIAL_VERSION -o release_notes_tmp.md env: # We can not use environments set via GitHub UI because they will # not be available in the pull requests running from forks. - INITIAL_COMMIT_SHA: ${{ env.INITIAL_COMMIT_SHA }} - INITIAL_VERSION: ${{ env.INITIAL_VERSION }} + RELEASE_INITIAL_COMMIT_SHA: ${{ env.RELEASE_INITIAL_COMMIT_SHA }} + RELEASE_INITIAL_VERSION: ${{ env.RELEASE_INITIAL_VERSION }} - name: Add disclaimer to release notes preview run: | echo -e "_:warning: (this preview might not be accurate if the PR is not rebased on current master branch)_\n" > release_notes_preview.md diff --git a/PIPELINE.md b/PIPELINE.md deleted file mode 100644 index e6383446c..000000000 --- a/PIPELINE.md +++ /dev/null @@ -1,70 +0,0 @@ -# Pipeline - -## Environment Variables (env vars) - -This listing contains all environment variables used in `pipeline.py`. -Default evergreen-ci expansions can be looked up [here](https://docs.devprod.prod.corp.mongodb.com/evergreen/Project-Configuration/Project-Configuration-Files#expansions). - -| Environment Variable | Usage / Description | -|-------------------------------|------------------------------------------------------------------------------------| -| `otel_trace_id` | OpenTelemetry tracing: trace ID. Default evergreen-ci expansion. | -| `otel_parent_id` | OpenTelemetry tracing: parent span ID. Default evergreen-ci expansion. | -| `otel_collector_endpoint` | OpenTelemetry tracing: collector endpoint. Default evergreen-ci expansion. | -| `distro` | Image type (defaults to `ubi`) | -| `BASE_REPO_URL` | Base repository URL for images | -| `namespace` | Kubernetes namespace (defaults to `default`) | -| `skip_tags` | Tags to skip during build | -| `include_tags` | Tags to include during build | -| `all_agents` | Whether to build all agent images | -| `RUNNING_IN_EVG` | Whether running in Evergreen pipeline | -| `is_patch` | Whether running as a patch build. Default evergreen-ci expansion. | -| `pin_tag_at` | Time to pin image tag (format: `HH:MM`) | -| `created_at` | Build creation time (format: `%y_%m_%d_%H_%M_%S`). Default evergreen-ci expansion. | -| `triggered_by_git_tag` | Git tag that triggered the build. Default evergreen-ci expansion. Default evergreen-ci expansion. | -| `version_id` | Patch ID or version for non-release builds. Default evergreen-ci expansion. | -| `test_suffix` | Suffix for test images | -| `LOG_AUTOMATION_CONFIG_DIFF` | Whether to log automation config diff | -| `PYTHON_VERSION` | Python version for test images | -| `GOLANG_VERSION` | Go version for community images and tests | -| `QUAY_REGISTRY` | Quay registry URL (defaults to `quay.io/mongodb`) | -| `REGISTRY` | ECR registry URL (defaults to `268558157000.dkr.ecr.us-east-1.amazonaws.com/dev`) | -| `om_version` | Ops Manager version for OM image builds | -| `om_download_url` | Download URL for Ops Manager (optional, can be auto-detected) | - -## Context Image Build Process - -``` - ┌─────────────────────────────┐ - │ Release Pipeline │ - └────────────┬────────────────┘ - │ - ▼ - ┌─────────────────────────────────┐ - │ Build context image │ - │ Tag: opsmanager-context:1.33.0 │ - └────────────┬────────────────────┘ - │ - ▼ - ┌───────────────────────────────┐ - │ Daily Build │ - │ Base: opsmanager-context │ - │ Input tag: 1.33.0 │ - └────────────┬──────────────────┘ - │ - ▼ - ┌────────────────────────────────────┐ - │ Push Two Image Tags │ - └────────────┬───────────────┬───────┘ - ▼ ▼ - ┌────────────────────────┐ ┌──────────────────────────────┐ - │ Rolling Tag (latest) │ │ Immutable Tag (daily stamp) │ - │ opsmanager:1.33.0 │ │ opsmanager:1.33.0-2025-01-01 │ - └────────────────────────┘ └──────────────────────────────┘ - - ▼ (next day build) - ┌────────────────────────┐ ┌──────────────────────────────┐ - │ opsmanager:1.33.0 │ │ opsmanager:1.33.0-2025-01-02 │ - └────────────────────────┘ └──────────────────────────────┘ - ↑ now updated to point ↑ new image pushed - to the 2025-01-02 build -``` diff --git a/build_info.json b/build_info.json index 6bb5ddd50..70a5de772 100644 --- a/build_info.json +++ b/build_info.json @@ -1,11 +1,11 @@ { "images": { "operator": { - "dockerfile-path": "docker/mongodb-kubernetes-operator/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-kubernetes-operator/Dockerfile", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes"], "platforms": [ - "linux/amd64" + "linux/amd64" ] }, "staging": { @@ -20,6 +20,7 @@ }, "release": { "sign": true, + "olm-tag": true, "repositories": ["quay.io/mongodb/mongodb-kubernetes"], "platforms": [ "linux/arm64", @@ -30,7 +31,7 @@ } }, "operator-race": { - "dockerfile-path": "docker/mongodb-kubernetes-operator/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-kubernetes-operator/Dockerfile", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes"], "platforms": [ @@ -46,15 +47,16 @@ } }, "init-database": { - "dockerfile-path": "docker/mongodb-kubernetes-init-database/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-kubernetes-init-database/Dockerfile", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database"], "platforms": [ - "linux/amd64" - ] + "linux/amd64" + ] }, "staging": { "sign": true, + "latest-tag": true, "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-database"], "platforms": [ "linux/arm64", @@ -65,6 +67,7 @@ }, "release": { "sign": true, + "olm-tag": true, "repositories": ["quay.io/mongodb/mongodb-kubernetes-init-database"], "platforms": [ "linux/arm64", @@ -75,7 +78,7 @@ } }, "init-appdb": { - "dockerfile-path": "docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-kubernetes-init-appdb/Dockerfile", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb"], "platforms": [ @@ -84,6 +87,7 @@ }, "staging": { "sign": true, + "latest-tag": true, "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-appdb"], "platforms": [ "linux/arm64", @@ -94,6 +98,7 @@ }, "release": { "sign": true, + "olm-tag": true, "repositories": ["quay.io/mongodb/mongodb-kubernetes-init-appdb"], "platforms": [ "linux/arm64", @@ -104,7 +109,7 @@ } }, "init-ops-manager": { - "dockerfile-path": "docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-kubernetes-init-ops-manager/Dockerfile", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager"], "platforms": [ @@ -113,6 +118,7 @@ }, "staging": { "sign": true, + "latest-tag": true, "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-init-ops-manager"], "platforms": [ "linux/amd64" @@ -120,6 +126,7 @@ }, "release": { "sign": true, + "olm-tag": true, "repositories": ["quay.io/mongodb/mongodb-kubernetes-init-ops-manager"], "platforms": [ "linux/amd64" @@ -127,15 +134,16 @@ } }, "database": { - "dockerfile-path": "docker/mongodb-kubernetes-database/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-kubernetes-database/Dockerfile", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database"], "platforms": [ - "linux/amd64" + "linux/amd64" ] }, "staging": { "sign": true, + "latest-tag": true, "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-database"], "platforms": [ "linux/arm64", @@ -146,6 +154,7 @@ }, "release": { "sign": true, + "olm-tag": true, "repositories": ["quay.io/mongodb/mongodb-kubernetes-database"], "platforms": [ "linux/arm64", @@ -187,7 +196,7 @@ } }, "readiness-probe": { - "dockerfile-path": "docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-kubernetes-readinessprobe/Dockerfile", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe"], "platforms": [ @@ -196,6 +205,7 @@ }, "staging": { "sign": true, + "latest-tag": true, "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe"], "platforms": [ "linux/arm64", @@ -205,6 +215,7 @@ "release": { "version": "1.0.22", "sign": true, + "olm-tag": true, "repositories": [ "quay.io/mongodb/mongodb-kubernetes-readinessprobe" ], @@ -215,7 +226,7 @@ } }, "upgrade-hook": { - "dockerfile-path": "docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-kubernetes-upgrade-hook/Dockerfile", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-operator-version-upgrade-post-start-hook"], "platforms": [ @@ -224,6 +235,7 @@ }, "staging": { "sign": true, + "latest-tag": true, "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-operator-version-upgrade-post-start-hook"], "platforms": [ "linux/arm64", @@ -233,6 +245,7 @@ "release": { "version": "1.0.9", "sign": true, + "olm-tag": true, "repositories": ["quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook"], "platforms": [ "linux/arm64", @@ -241,11 +254,11 @@ } }, "agent": { - "dockerfile-path": "docker/mongodb-agent/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-agent/Dockerfile", "patch": { "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent"], "platforms": [ - "linux/amd64" + "linux/amd64" ] }, "staging": { @@ -260,6 +273,7 @@ }, "manual_release": { "sign": true, + "olm-tag": true, "repositories": ["quay.io/mongodb/mongodb-agent-ubi", "quay.io/mongodb/mongodb-agent"], "platforms": [ "linux/arm64", @@ -270,7 +284,7 @@ } }, "ops-manager": { - "dockerfile-path": "docker/mongodb-enterprise-ops-manager/Dockerfile.atomic", + "dockerfile-path": "docker/mongodb-enterprise-ops-manager/Dockerfile", "patch": { "version": "om-version-from-release.json", "repositories": ["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-enterprise-ops-manager"], @@ -288,6 +302,7 @@ }, "manual_release": { "sign": true, + "olm-tag": true, "repositories": ["quay.io/mongodb/mongodb-enterprise-ops-manager"], "platforms": [ "linux/amd64" diff --git a/docker/mongodb-agent/Dockerfile.atomic b/docker/mongodb-agent/Dockerfile similarity index 100% rename from docker/mongodb-agent/Dockerfile.atomic rename to docker/mongodb-agent/Dockerfile diff --git a/docker/mongodb-agent/Dockerfile.builder b/docker/mongodb-agent/Dockerfile.builder deleted file mode 100644 index ac4dd31f0..000000000 --- a/docker/mongodb-agent/Dockerfile.builder +++ /dev/null @@ -1,15 +0,0 @@ -FROM scratch - -ARG agent_version -ARG agent_distro -ARG tools_distro -ARG tools_version - -ADD https://mciuploads.s3.amazonaws.com/mms-automation/mongodb-mms-build-agent/builds/automation-agent/prod/mongodb-mms-automation-agent-${agent_version}.${agent_distro}.tar.gz /data/mongodb-agent.tar.gz -ADD https://downloads.mongodb.org/tools/db/mongodb-database-tools-${tools_distro}-${tools_version}.tgz /data/mongodb-tools.tgz - -COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/LICENSE -COPY ./docker/mongodb-agent/agent-launcher-shim.sh /opt/scripts/agent-launcher-shim.sh -COPY ./docker/mongodb-agent/setup-agent-files.sh /opt/scripts/setup-agent-files.sh -COPY ./docker/mongodb-agent/dummy-probe.sh /opt/scripts/dummy-probe.sh -COPY ./docker/mongodb-agent/dummy-readinessprobe.sh /opt/scripts/dummy-readinessprobe.sh diff --git a/docker/mongodb-agent/Dockerfile.old b/docker/mongodb-agent/Dockerfile.old deleted file mode 100644 index 9022dd86f..000000000 --- a/docker/mongodb-agent/Dockerfile.old +++ /dev/null @@ -1,65 +0,0 @@ -ARG imagebase -FROM ${imagebase} as base - -FROM registry.access.redhat.com/ubi9/ubi-minimal - -ARG version - -LABEL name="MongoDB Agent" \ - version="${version}" \ - summary="MongoDB Agent" \ - description="MongoDB Agent" \ - vendor="MongoDB" \ - release="1" \ - maintainer="support@mongodb.com" - -# Replace libcurl-minimal and curl-minimal with the full versions -# https://bugzilla.redhat.com/show_bug.cgi?id=1994521 -RUN microdnf install -y libssh libpsl libbrotli \ - && microdnf download curl libcurl \ - && rpm -Uvh --nodeps --replacefiles "*curl*$( uname -i ).rpm" \ - && microdnf remove -y libcurl-minimal curl-minimal - -RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper -# Copy-pasted from https://www.mongodb.com/docs/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ -RUN microdnf install -y --disableplugin=subscription-manager \ - cyrus-sasl cyrus-sasl-gssapi cyrus-sasl-plain krb5-libs openldap openssl xz-libs -# Dependencies for the Agent -RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 \ - net-snmp \ - net-snmp-agent-libs -RUN microdnf install -y --disableplugin=subscription-manager \ - hostname tar gzip procps jq \ - && microdnf upgrade -y \ - && rm -rf /var/lib/apt/lists/* - -RUN mkdir -p /agent \ - && mkdir -p /var/lib/mongodb-mms-automation \ - && mkdir -p /var/log/mongodb-mms-automation/ \ - && chmod -R +wr /var/log/mongodb-mms-automation/ \ - # ensure that the agent user can write the logs in OpenShift - && touch /var/log/mongodb-mms-automation/readiness.log \ - && chmod ugo+rw /var/log/mongodb-mms-automation/readiness.log - -COPY --from=base /data/mongodb-agent.tar.gz /agent -COPY --from=base /data/mongodb-tools.tgz /agent -COPY --from=base /data/LICENSE /licenses/LICENSE - -# Copy scripts to a safe location that won't be overwritten by volume mount -COPY --from=base /opt/scripts/agent-launcher-shim.sh /usr/local/bin/agent-launcher-shim.sh -COPY --from=base /opt/scripts/setup-agent-files.sh /usr/local/bin/setup-agent-files.sh -COPY --from=base /opt/scripts/dummy-probe.sh /usr/local/bin/dummy-probe.sh -COPY --from=base /opt/scripts/dummy-readinessprobe /usr/local/bin/dummy-readinessprobe - -RUN tar xfz /agent/mongodb-agent.tar.gz \ - && mv mongodb-mms-automation-agent-*/mongodb-mms-automation-agent /agent/mongodb-agent \ - && chmod +x /agent/mongodb-agent \ - && mkdir -p /var/lib/automation/config \ - && chmod -R +r /var/lib/automation/config \ - && rm /agent/mongodb-agent.tar.gz \ - && rm -r mongodb-mms-automation-agent-* - -RUN tar xfz /agent/mongodb-tools.tgz --directory /var/lib/mongodb-mms-automation/ && rm /agent/mongodb-tools.tgz - -USER 2000 -CMD ["/agent/mongodb-agent", "-cluster=/var/lib/automation/config/automation-config.json"] diff --git a/docker/mongodb-enterprise-ops-manager/Dockerfile.atomic b/docker/mongodb-enterprise-ops-manager/Dockerfile similarity index 100% rename from docker/mongodb-enterprise-ops-manager/Dockerfile.atomic rename to docker/mongodb-enterprise-ops-manager/Dockerfile diff --git a/docker/mongodb-enterprise-ops-manager/Dockerfile.builder b/docker/mongodb-enterprise-ops-manager/Dockerfile.builder deleted file mode 100644 index 7ed2fea71..000000000 --- a/docker/mongodb-enterprise-ops-manager/Dockerfile.builder +++ /dev/null @@ -1,20 +0,0 @@ -# Build compilable stuff - -FROM public.ecr.aws/docker/library/golang:1.24 as readiness_builder -COPY . /go/src/github.com/mongodb/mongodb-kubernetes -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes - -RUN CGO_ENABLED=0 go build -a -buildvcs=false -o /data/scripts/mmsconfiguration ./docker/mongodb-kubernetes-init-ops-manager/mmsconfiguration/edit_mms_configuration.go -RUN CGO_ENABLED=0 go build -a -buildvcs=false -o /data/scripts/backup-daemon-readiness-probe ./docker/mongodb-kubernetes-init-ops-manager/backupdaemon_readinessprobe/backupdaemon_readiness.go - -# Move binaries and scripts -FROM scratch - -COPY --from=readiness_builder /data/scripts/mmsconfiguration /data/scripts/mmsconfiguration -COPY --from=readiness_builder /data/scripts/backup-daemon-readiness-probe /data/scripts/backup-daemon-readiness-probe - -# After v2.0, when non-Static Agent images will be removed, please ensure to copy those files -# into ./docker/mongodb-enterprise-ops-manager directory. Leaving it this way will make the maintenance easier. -COPY ./docker/mongodb-kubernetes-init-ops-manager/scripts/docker-entry-point.sh /data/scripts -COPY ./docker/mongodb-kubernetes-init-ops-manager/scripts/backup-daemon-liveness-probe.sh /data/scripts -COPY ./docker/mongodb-kubernetes-init-ops-manager/LICENSE /data/licenses/mongodb-enterprise-ops-manager diff --git a/docker/mongodb-enterprise-ops-manager/Dockerfile.template b/docker/mongodb-enterprise-ops-manager/Dockerfile.template deleted file mode 100644 index 5e2b10d7d..000000000 --- a/docker/mongodb-enterprise-ops-manager/Dockerfile.template +++ /dev/null @@ -1,61 +0,0 @@ -ARG imagebase -FROM ${imagebase} as base - -FROM {{ base_image }} - -{% block labels %} -LABEL name="MongoDB Enterprise Ops Manager" \ - maintainer="support@mongodb.com" \ - vendor="MongoDB" \ - version="{{ version }}" \ - release="1" \ - summary="MongoDB Enterprise Ops Manager Image" \ - description="MongoDB Enterprise Ops Manager" -{% endblock %} - -ENV MMS_HOME /mongodb-ops-manager -ENV MMS_PROP_FILE ${MMS_HOME}/conf/conf-mms.properties -ENV MMS_CONF_FILE ${MMS_HOME}/conf/mms.conf -ENV MMS_LOG_DIR ${MMS_HOME}/logs -ENV MMS_TMP_DIR ${MMS_HOME}/tmp - -EXPOSE 8080 - -# OpsManager docker image needs to have the MongoDB dependencies because the -# backup daemon is running its database locally -{% block packages %} -{% endblock %} - -COPY --from=base /data/licenses /licenses/ - -COPY --from=base /data/scripts /opt/scripts - - -{% block static %} -RUN curl --fail -L -o ops_manager.tar.gz {{ om_download_url }} \ - && tar -xzf ops_manager.tar.gz \ - && rm ops_manager.tar.gz \ - && mv mongodb-mms* "${MMS_HOME}" -{% endblock %} - -# permissions -RUN chmod -R 0777 "${MMS_LOG_DIR}" \ - && chmod -R 0777 "${MMS_TMP_DIR}" \ - && chmod -R 0775 "${MMS_HOME}/conf" \ - && chmod -R 0775 "${MMS_HOME}/jdk" \ - && mkdir "${MMS_HOME}/mongodb-releases/" \ - && chmod -R 0775 "${MMS_HOME}/mongodb-releases" \ - && chmod -R 0777 "${MMS_CONF_FILE}" \ - && chmod -R 0777 "${MMS_PROP_FILE}" - -# The "${MMS_HOME}/conf" will be populated by the docker-entry-point.sh. -# For now we need to move into the templates directory. -RUN cp -r "${MMS_HOME}/conf" "${MMS_HOME}/conf-template" - -USER 2000 - -# operator to change the entrypoint to: /mongodb-ops-manager/bin/mongodb-mms start_mms (or a wrapper around this) -ENTRYPOINT [ "sleep infinity" ] - -{% block healthcheck %} -{% endblock %} diff --git a/docker/mongodb-enterprise-ops-manager/Dockerfile.ubi b/docker/mongodb-enterprise-ops-manager/Dockerfile.ubi deleted file mode 100644 index 00cb2f200..000000000 --- a/docker/mongodb-enterprise-ops-manager/Dockerfile.ubi +++ /dev/null @@ -1,30 +0,0 @@ -{% extends "Dockerfile.template" %} - -{% set base_image = "registry.access.redhat.com/ubi9/ubi-minimal" %} - -{% block packages %} - -# Replace libcurl-minimal and curl-minimal with the full versions -# https://bugzilla.redhat.com/show_bug.cgi?id=1994521 -RUN microdnf install -y libssh libpsl libbrotli \ - && microdnf download curl libcurl \ - && rpm -Uvh --nodeps --replacefiles "*curl*$( uname -i ).rpm" \ - && microdnf remove -y libcurl-minimal curl-minimal - -RUN microdnf install --disableplugin=subscription-manager -y \ - cyrus-sasl \ - cyrus-sasl-gssapi \ - cyrus-sasl-plain \ - krb5-libs \ - libpcap \ - lm_sensors-libs \ - net-snmp \ - net-snmp-agent-libs \ - openldap \ - openssl \ - tar \ - rpm-libs \ - net-tools \ - procps-ng \ - ncurses -{% endblock %} diff --git a/docker/mongodb-kubernetes-database/Dockerfile.atomic b/docker/mongodb-kubernetes-database/Dockerfile similarity index 100% rename from docker/mongodb-kubernetes-database/Dockerfile.atomic rename to docker/mongodb-kubernetes-database/Dockerfile diff --git a/docker/mongodb-kubernetes-database/Dockerfile.builder b/docker/mongodb-kubernetes-database/Dockerfile.builder deleted file mode 100644 index b0ad88835..000000000 --- a/docker/mongodb-kubernetes-database/Dockerfile.builder +++ /dev/null @@ -1,13 +0,0 @@ -# -## Database image -# -## Contents -# -# * licenses/mongodb-kubernetes-database - - -FROM scratch - - - -COPY LICENSE /data/licenses/mongodb-kubernetes-database diff --git a/docker/mongodb-kubernetes-database/Dockerfile.template b/docker/mongodb-kubernetes-database/Dockerfile.template deleted file mode 100644 index 4f8727eae..000000000 --- a/docker/mongodb-kubernetes-database/Dockerfile.template +++ /dev/null @@ -1,58 +0,0 @@ -ARG imagebase -FROM ${imagebase} as base - -FROM {{ base_image }} - -{% block labels %} - -LABEL name="MongoDB Kubernetes Database" \ - version="{{ version }}" \ - summary="MongoDB Kubernetes Database Image" \ - description="MongoDB Kubernetes Database Image" \ - vendor="MongoDB" \ - release="1" \ - maintainer="support@mongodb.com" - -{% endblock %} - - -{% block variables %} -ENV MMS_HOME /mongodb-automation -ENV MMS_LOG_DIR /var/log/mongodb-mms-automation -{% endblock %} - -{% block packages %} -{% endblock %} - -# Set the required perms -RUN mkdir -p "${MMS_LOG_DIR}" \ - && chmod 0775 "${MMS_LOG_DIR}" \ - && mkdir -p /var/lib/mongodb-mms-automation \ - && chmod 0775 /var/lib/mongodb-mms-automation \ - && mkdir -p /data \ - && chmod 0775 /data \ - && mkdir -p /journal \ - && chmod 0775 /journal \ - && mkdir -p "${MMS_HOME}" \ - && chmod -R 0775 "${MMS_HOME}" - -{% block dcar_copy_scripts %} -{% endblock %} - -# USER needs to be set for this image to pass RedHat verification. Some customers have these requirements as well -# It does not matter what number it is, as long as it is set to something. -# However, OpenShift will run the container as a random user, -# and the number in this configuration is not relevant. -USER 2000 - -{% block entrypoint %} -# The docker image doesn't have any scripts so by default does nothing -# The script will be copied in runtime from init containers and the operator is expected -# to override the COMMAND -ENTRYPOINT ["sleep infinity"] -{% endblock %} - -COPY --from=base /data/licenses/mongodb-kubernetes-database /licenses/mongodb-kubernetes-database - -{% block healthcheck %} -{% endblock %} diff --git a/docker/mongodb-kubernetes-database/Dockerfile.ubi b/docker/mongodb-kubernetes-database/Dockerfile.ubi deleted file mode 100644 index a94b625c9..000000000 --- a/docker/mongodb-kubernetes-database/Dockerfile.ubi +++ /dev/null @@ -1,41 +0,0 @@ -{% extends "Dockerfile.template" %} - -{% set base_image = "registry.access.redhat.com/ubi8/ubi-minimal" %} -{% set distro = "ubi" %} - -{% block packages %} -RUN microdnf update -y && rm -rf /var/cache/yum - -# these are the packages needed for the agent -RUN microdnf install -y --disableplugin=subscription-manager --setopt=install_weak_deps=0 nss_wrapper -RUN microdnf install -y --disableplugin=subscription-manager \ - hostname \ - procps - - -# these are the packages needed for MongoDB -# (https://docs.mongodb.com/manual/tutorial/install-mongodb-enterprise-on-red-hat-tarball/ "RHEL/CentOS 8" tab) -RUN microdnf install -y --disableplugin=subscription-manager \ - cyrus-sasl \ - cyrus-sasl-gssapi \ - cyrus-sasl-plain \ - krb5-libs \ - libcurl \ - lm_sensors-libs \ - net-snmp \ - net-snmp-agent-libs \ - openldap \ - openssl \ - jq \ - tar \ - xz-libs \ - findutils - - -{# -TODO: Find public mongodb documentation about this -# mongodb enterprise expects this library /usr/lib64/libsasl2.so.2 but -# cyrus-sasl creates it in /usr/lib64/libsasl2.so.3 instead -#} -RUN ln -s /usr/lib64/libsasl2.so.3 /usr/lib64/libsasl2.so.2 -{% endblock %} diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic b/docker/mongodb-kubernetes-init-appdb/Dockerfile similarity index 100% rename from docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic rename to docker/mongodb-kubernetes-init-appdb/Dockerfile diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile.builder b/docker/mongodb-kubernetes-init-appdb/Dockerfile.builder deleted file mode 100644 index 69dc6d6af..000000000 --- a/docker/mongodb-kubernetes-init-appdb/Dockerfile.builder +++ /dev/null @@ -1,22 +0,0 @@ -# Build compilable stuff - -FROM public.ecr.aws/docker/library/golang:1.24 as readiness_builder -COPY . /go/src/github.com/mongodb/mongodb-kubernetes -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go - -FROM scratch -ARG mongodb_tools_url_ubi - -COPY --from=readiness_builder /readinessprobe /data/ -COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook - -ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz - -COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh - -COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh /data/scripts/ -COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher.sh /data/scripts/ - -COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile.template b/docker/mongodb-kubernetes-init-appdb/Dockerfile.template deleted file mode 100644 index 3c0d45ee4..000000000 --- a/docker/mongodb-kubernetes-init-appdb/Dockerfile.template +++ /dev/null @@ -1,42 +0,0 @@ -ARG imagebase -FROM ${imagebase} as base - -FROM {{ base_image }} - -ARG version - -{%- if is_appdb %} -LABEL name="MongoDB Kubernetes Init AppDB" \ - version="mongodb-kubernetes-init-appdb-${version}" \ - summary="MongoDB Kubernetes AppDB Init Image" \ - description="Startup Scripts for MongoDB Enterprise Application Database for Ops Manager" \ -{%- else %} -LABEL name="MongoDB Kubernetes Init Database" \ - version="mongodb-kubernetes-init-database-${version}" \ - summary="MongoDB Kubernetes Database Init Image" \ - description="Startup Scripts for MongoDB Enterprise Database" \ -{%- endif %} - release="1" \ - vendor="MongoDB" \ - maintainer="support@mongodb.com" - -COPY --from=base /data/readinessprobe /probes/readinessprobe -COPY --from=base /data/probe.sh /probes/probe.sh -COPY --from=base /data/scripts/ /scripts/ -COPY --from=base /data/licenses /licenses/ - -{%- if is_appdb %} -COPY --from=base /data/version-upgrade-hook /probes/version-upgrade-hook -{%- endif %} - -{% block mongodb_tools %} -{% endblock %} - -RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ - && rm /tools/mongodb_tools.tgz - -USER 2000 -ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] - -{% block healthcheck %} -{% endblock %} diff --git a/docker/mongodb-kubernetes-init-appdb/Dockerfile.ubi_minimal b/docker/mongodb-kubernetes-init-appdb/Dockerfile.ubi_minimal deleted file mode 100644 index b5400b147..000000000 --- a/docker/mongodb-kubernetes-init-appdb/Dockerfile.ubi_minimal +++ /dev/null @@ -1,11 +0,0 @@ -{% extends "Dockerfile.template" %} - -{% set base_image = "registry.access.redhat.com/ubi8/ubi-minimal" %} - -{% block mongodb_tools %} -RUN microdnf -y update --nodocs \ - && microdnf -y install --nodocs tar gzip \ - && microdnf clean all - -COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz -{% endblock %} diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile.atomic b/docker/mongodb-kubernetes-init-database/Dockerfile similarity index 100% rename from docker/mongodb-kubernetes-init-database/Dockerfile.atomic rename to docker/mongodb-kubernetes-init-database/Dockerfile diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile.builder b/docker/mongodb-kubernetes-init-database/Dockerfile.builder deleted file mode 100644 index 69dc6d6af..000000000 --- a/docker/mongodb-kubernetes-init-database/Dockerfile.builder +++ /dev/null @@ -1,22 +0,0 @@ -# Build compilable stuff - -FROM public.ecr.aws/docker/library/golang:1.24 as readiness_builder -COPY . /go/src/github.com/mongodb/mongodb-kubernetes -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /readinessprobe ./mongodb-community-operator/cmd/readiness/main.go -RUN CGO_ENABLED=0 GOFLAGS=-buildvcs=false go build -o /version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go - -FROM scratch -ARG mongodb_tools_url_ubi - -COPY --from=readiness_builder /readinessprobe /data/ -COPY --from=readiness_builder /version-upgrade-hook /data/version-upgrade-hook - -ADD ${mongodb_tools_url_ubi} /data/mongodb_tools_ubi.tgz - -COPY ./docker/mongodb-kubernetes-init-database/content/probe.sh /data/probe.sh - -COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher-lib.sh /data/scripts/ -COPY ./docker/mongodb-kubernetes-init-database/content/agent-launcher.sh /data/scripts/ - -COPY ./docker/mongodb-kubernetes-init-database/content/LICENSE /data/licenses/ diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile.template b/docker/mongodb-kubernetes-init-database/Dockerfile.template deleted file mode 100644 index 3c0d45ee4..000000000 --- a/docker/mongodb-kubernetes-init-database/Dockerfile.template +++ /dev/null @@ -1,42 +0,0 @@ -ARG imagebase -FROM ${imagebase} as base - -FROM {{ base_image }} - -ARG version - -{%- if is_appdb %} -LABEL name="MongoDB Kubernetes Init AppDB" \ - version="mongodb-kubernetes-init-appdb-${version}" \ - summary="MongoDB Kubernetes AppDB Init Image" \ - description="Startup Scripts for MongoDB Enterprise Application Database for Ops Manager" \ -{%- else %} -LABEL name="MongoDB Kubernetes Init Database" \ - version="mongodb-kubernetes-init-database-${version}" \ - summary="MongoDB Kubernetes Database Init Image" \ - description="Startup Scripts for MongoDB Enterprise Database" \ -{%- endif %} - release="1" \ - vendor="MongoDB" \ - maintainer="support@mongodb.com" - -COPY --from=base /data/readinessprobe /probes/readinessprobe -COPY --from=base /data/probe.sh /probes/probe.sh -COPY --from=base /data/scripts/ /scripts/ -COPY --from=base /data/licenses /licenses/ - -{%- if is_appdb %} -COPY --from=base /data/version-upgrade-hook /probes/version-upgrade-hook -{%- endif %} - -{% block mongodb_tools %} -{% endblock %} - -RUN tar xfz /tools/mongodb_tools.tgz --directory /tools \ - && rm /tools/mongodb_tools.tgz - -USER 2000 -ENTRYPOINT [ "/bin/cp", "-f", "-r", "/scripts/agent-launcher.sh", "/scripts/agent-launcher-lib.sh", "/probes/readinessprobe", "/probes/probe.sh", "/tools", "/opt/scripts/" ] - -{% block healthcheck %} -{% endblock %} diff --git a/docker/mongodb-kubernetes-init-database/Dockerfile.ubi_minimal b/docker/mongodb-kubernetes-init-database/Dockerfile.ubi_minimal deleted file mode 100644 index b5400b147..000000000 --- a/docker/mongodb-kubernetes-init-database/Dockerfile.ubi_minimal +++ /dev/null @@ -1,11 +0,0 @@ -{% extends "Dockerfile.template" %} - -{% set base_image = "registry.access.redhat.com/ubi8/ubi-minimal" %} - -{% block mongodb_tools %} -RUN microdnf -y update --nodocs \ - && microdnf -y install --nodocs tar gzip \ - && microdnf clean all - -COPY --from=base /data/mongodb_tools_ubi.tgz /tools/mongodb_tools.tgz -{% endblock %} diff --git a/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic b/docker/mongodb-kubernetes-init-ops-manager/Dockerfile similarity index 100% rename from docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic rename to docker/mongodb-kubernetes-init-ops-manager/Dockerfile diff --git a/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.builder b/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.builder deleted file mode 100644 index 62fa29cd7..000000000 --- a/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.builder +++ /dev/null @@ -1,14 +0,0 @@ -# -# Dockerfile for Init Ops Manager Context. -# - -FROM public.ecr.aws/docker/library/golang:1.24 as builder -WORKDIR /go/src -ADD . . -RUN CGO_ENABLED=0 go build -a -buildvcs=false -o /data/scripts/mmsconfiguration ./mmsconfiguration -RUN CGO_ENABLED=0 go build -a -buildvcs=false -o /data/scripts/backup-daemon-readiness-probe ./backupdaemon_readinessprobe/ - -COPY scripts/docker-entry-point.sh /data/scripts/ -COPY scripts/backup-daemon-liveness-probe.sh /data/scripts/ - -COPY LICENSE /data/licenses/mongodb-enterprise-ops-manager diff --git a/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.template b/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.template deleted file mode 100644 index ca53b644b..000000000 --- a/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.template +++ /dev/null @@ -1,25 +0,0 @@ -ARG imagebase -FROM ${imagebase} as base - -FROM {{ base_image }} - -LABEL name="MongoDB Kubernetes Ops Manager Init" \ - maintainer="support@mongodb.com" \ - vendor="MongoDB" \ - version="mongodb-kubernetes-init-ops-manager-{{version}}" \ - release="1" \ - summary="MongoDB Kubernetes Ops Manager Init Image" \ - description="Startup Scripts for MongoDB Enterprise Ops Manager" - - -COPY --from=base /data/scripts /scripts -COPY --from=base /data/licenses /licenses - -{% block packages %} -{% endblock %} - -USER 2000 -ENTRYPOINT [ "/bin/cp", "-f", "/scripts/docker-entry-point.sh", "/scripts/backup-daemon-liveness-probe.sh", "/scripts/mmsconfiguration", "/scripts/backup-daemon-readiness-probe", "/opt/scripts/" ] - -{% block healthcheck %} -{% endblock %} diff --git a/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.ubi_minimal b/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.ubi_minimal deleted file mode 100644 index 2f16d3d9a..000000000 --- a/docker/mongodb-kubernetes-init-ops-manager/Dockerfile.ubi_minimal +++ /dev/null @@ -1,8 +0,0 @@ -{% extends "Dockerfile.template" %} - -{% set base_image = "registry.access.redhat.com/ubi9/ubi-minimal" %} - -{% block packages %} -RUN microdnf -y update --nodocs \ - && microdnf clean all -{% endblock %} diff --git a/docker/mongodb-kubernetes-operator/Dockerfile.atomic b/docker/mongodb-kubernetes-operator/Dockerfile similarity index 100% rename from docker/mongodb-kubernetes-operator/Dockerfile.atomic rename to docker/mongodb-kubernetes-operator/Dockerfile diff --git a/docker/mongodb-kubernetes-operator/Dockerfile.builder b/docker/mongodb-kubernetes-operator/Dockerfile.builder deleted file mode 100644 index 5663cf16e..000000000 --- a/docker/mongodb-kubernetes-operator/Dockerfile.builder +++ /dev/null @@ -1,53 +0,0 @@ -# -# Dockerfile for Operator. -# to be called from git root -# docker build . -f docker/mongodb-kubernetes-operator/Dockerfile.builder -# - -FROM public.ecr.aws/docker/library/golang:1.24 as builder - -ARG release_version -ARG log_automation_config_diff -ARG use_race -ARG TARGETOS -ARG TARGETARCH - -COPY go.sum go.mod /go/src/github.com/mongodb/mongodb-kubernetes/ - -WORKDIR /go/src/github.com/mongodb/mongodb-kubernetes -RUN go mod download - -COPY . /go/src/github.com/mongodb/mongodb-kubernetes - -RUN go version -RUN git version -RUN mkdir /build && \ - if [ $use_race = "true" ]; then \ - echo "Building with race detector" && \ - CGO_ENABLED=1 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /build/mongodb-kubernetes-operator \ - -buildvcs=false \ - -race \ - -ldflags=" -X github.com/mongodb/mongodb-kubernetes/pkg/util.OperatorVersion=${release_version} \ - -X github.com/mongodb/mongodb-kubernetes/pkg/util.LogAutomationConfigDiff=${log_automation_config_diff}"; \ - else \ - echo "Building without race detector" && \ - CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -o /build/mongodb-kubernetes-operator \ - -buildvcs=false \ - -ldflags="-s -w -X github.com/mongodb/mongodb-kubernetes/pkg/util.OperatorVersion=${release_version} \ - -X github.com/mongodb/mongodb-kubernetes/pkg/util.LogAutomationConfigDiff=${log_automation_config_diff}"; \ - fi - - -ADD https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 /usr/local/bin/jq -RUN chmod +x /usr/local/bin/jq - -RUN mkdir -p /data -RUN cat release.json | jq -r '.supportedImages."mongodb-agent" | { "supportedImages": { "mongodb-agent": . } }' > /data/om_version_mapping.json -RUN chmod +r /data/om_version_mapping.json - -FROM scratch - -COPY --from=builder /build/mongodb-kubernetes-operator /data/ -COPY --from=builder /data/om_version_mapping.json /data/om_version_mapping.json - -ADD docker/mongodb-kubernetes-operator/licenses /data/licenses/ diff --git a/docker/mongodb-kubernetes-operator/Dockerfile.template b/docker/mongodb-kubernetes-operator/Dockerfile.template deleted file mode 100644 index a06e4bd63..000000000 --- a/docker/mongodb-kubernetes-operator/Dockerfile.template +++ /dev/null @@ -1,36 +0,0 @@ -# -# Base Template Dockerfile for Operator Image. -# - -ARG imagebase -FROM ${imagebase} as base - -FROM {{ base_image }} - -{% block labels %} -LABEL name="MongoDB Kubernetes Operator" \ - maintainer="support@mongodb.com" \ - vendor="MongoDB" \ - version="{{ version }}" \ - release="1" \ - summary="MongoDB Kubernetes Operator Image" \ - description="MongoDB Kubernetes Operator Image" -{% endblock %} - -{% block packages -%} -{% endblock -%} - -{% block static %} -{% endblock %} - - -COPY --from=base /data/mongodb-kubernetes-operator /usr/local/bin/mongodb-kubernetes-operator -COPY --from=base /data/om_version_mapping.json /usr/local/om_version_mapping.json -COPY --from=base /data/licenses /licenses/ - -USER 2000 - -ENTRYPOINT exec /usr/local/bin/mongodb-kubernetes-operator - -{% block healthcheck %} -{% endblock %} diff --git a/docker/mongodb-kubernetes-operator/Dockerfile.ubi b/docker/mongodb-kubernetes-operator/Dockerfile.ubi deleted file mode 100644 index 04e83faf7..000000000 --- a/docker/mongodb-kubernetes-operator/Dockerfile.ubi +++ /dev/null @@ -1,12 +0,0 @@ -{% extends "Dockerfile.template" %} - -{% set base_image = "registry.access.redhat.com/ubi9/ubi-minimal" %} - -{% block packages -%} -# Building an UBI-based image: https://red.ht/3n6b9y0 -RUN microdnf update \ - --disableplugin=subscription-manager \ - --disablerepo=* --enablerepo=ubi-9-appstream-rpms --enablerepo=ubi-9-baseos-rpms -y \ - && rm -rf /var/cache/yum -RUN microdnf install -y glibc-langpack-en -{% endblock -%} diff --git a/docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic b/docker/mongodb-kubernetes-readinessprobe/Dockerfile similarity index 100% rename from docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic rename to docker/mongodb-kubernetes-readinessprobe/Dockerfile diff --git a/docker/mongodb-kubernetes-readinessprobe/Dockerfile.builder b/docker/mongodb-kubernetes-readinessprobe/Dockerfile.builder deleted file mode 100644 index e538e793e..000000000 --- a/docker/mongodb-kubernetes-readinessprobe/Dockerfile.builder +++ /dev/null @@ -1,11 +0,0 @@ -FROM public.ecr.aws/docker/library/golang:1.24 as builder -WORKDIR /go/src -ADD . . - -ARG TARGETOS -ARG TARGETARCH -RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -a -o /data/scripts/readinessprobe ./mongodb-community-operator/cmd/readiness/main.go - -FROM scratch as final - -COPY --from=builder /data/scripts/readinessprobe /probes/ diff --git a/docker/mongodb-kubernetes-readinessprobe/Dockerfile.old b/docker/mongodb-kubernetes-readinessprobe/Dockerfile.old deleted file mode 100644 index 17c590526..000000000 --- a/docker/mongodb-kubernetes-readinessprobe/Dockerfile.old +++ /dev/null @@ -1,6 +0,0 @@ -ARG imagebase -FROM ${imagebase} as base - -FROM registry.access.redhat.com/ubi9/ubi-minimal - -COPY --from=base /probes/readinessprobe /probes/readinessprobe diff --git a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile similarity index 100% rename from docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic rename to docker/mongodb-kubernetes-upgrade-hook/Dockerfile diff --git a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.builder b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.builder deleted file mode 100644 index 1aeffa93b..000000000 --- a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.builder +++ /dev/null @@ -1,11 +0,0 @@ -FROM public.ecr.aws/docker/library/golang:1.24 as builder -WORKDIR /go/src -ADD . . - -ARG TARGETOS -ARG TARGETARCH -RUN CGO_ENABLED=0 GOOS=${TARGETOS} GOARCH=${TARGETARCH} go build -a -o /data/scripts/version-upgrade-hook ./mongodb-community-operator/cmd/versionhook/main.go - -FROM scratch as final - -COPY --from=builder /data/scripts/version-upgrade-hook / diff --git a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.old b/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.old deleted file mode 100644 index 362831582..000000000 --- a/docker/mongodb-kubernetes-upgrade-hook/Dockerfile.old +++ /dev/null @@ -1,6 +0,0 @@ -ARG imagebase -FROM ${imagebase} as base - -FROM registry.access.redhat.com/ubi9/ubi-minimal - -COPY --from=base /version-upgrade-hook /version-upgrade-hook diff --git a/inventories/agent.yaml b/inventories/agent.yaml deleted file mode 100644 index 6601c7363..000000000 --- a/inventories/agent.yaml +++ /dev/null @@ -1,64 +0,0 @@ -vars: - quay_registry: quay.io/mongodb/mongodb-agent - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-agent - -images: - - name: mongodb-agent - vars: - context: . - template_context: docker/mongodb-agent - - platform: linux/$(inputs.params.architecture) - stages: - - name: mongodb-agent-context - task_type: docker_build - dockerfile: docker/mongodb-agent/Dockerfile.builder - tags: [ "ubi" ] - buildargs: - agent_version: $(inputs.params.version) - tools_version: $(inputs.params.tools_version) - agent_distro: $(inputs.params.agent_distro) - tools_distro: $(inputs.params.tools_distro) - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: $(inputs.params.version)-context-$(inputs.params.architecture) - - - name: mongodb-agent-build-context-release - task_type: docker_build - tags: ["release"] - dockerfile: docker/mongodb-agent/Dockerfile.builder - buildargs: - agent_version: $(inputs.params.version) - tools_version: $(inputs.params.tools_version) - agent_distro: $(inputs.params.agent_distro) - tools_distro: $(inputs.params.tools_distro) - output: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context-$(inputs.params.architecture) - - - name: mongodb-agent-build - task_type: docker_build - tags: [ "ubi" ] - buildargs: - imagebase: $(inputs.params.registry)/mongodb-agent-ubi:$(inputs.params.version)-context-$(inputs.params.architecture) - version: $(inputs.params.version) - dockerfile: docker/mongodb-agent/Dockerfile.old - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: $(inputs.params.version)-$(inputs.params.architecture) - - registry: $(inputs.params.registry)/mongodb-agent-ubi - tag: latest-$(inputs.params.architecture) - - - name: mongodb-agent-template-ubi - task_type: dockerfile_template - tags: ["release"] - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/inventories/daily.yaml b/inventories/daily.yaml deleted file mode 100644 index dd4551476..000000000 --- a/inventories/daily.yaml +++ /dev/null @@ -1,41 +0,0 @@ -vars: - # these variables are configured from the outside, in pipeline.py::image_config - quay_registry: quay.io/mongodb/ - s3_bucket_http: https://enterprise-operator-dockerfiles.s3.amazonaws.com/dockerfiles/ - ecr_registry_ubi: 268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/ - # ubi suffix is "-ubi" by default, but it's empty for mongodb-kubernetes-operator, readiness and versionhook images - ubi_suffix: "-ubi" - base_suffix: "" - architecture_suffix: "" - platform: "amd64" - -images: - - name: image-daily-build - vars: - context: . - platform: linux/$(inputs.params.platform) - stages: - - name: build-ubi - task_type: docker_build - tags: ["ubi"] - inputs: - - build_id - dockerfile: $(inputs.params.s3_bucket_http)/$(inputs.params.release_version)/ubi/Dockerfile - buildargs: - imagebase: $(inputs.params.quay_registry)$(inputs.params.base_suffix):$(inputs.params.release_version)-context$(inputs.params.architecture_suffix) - # This is required for correctly labeling the agent image and is not used - # in the other images. - version: $(inputs.params.release_version) - output: - - registry: $(inputs.params.quay_registry)$(inputs.params.ubi_suffix) - tag: $(inputs.params.release_version)$(inputs.params.architecture_suffix) - - registry: $(inputs.params.quay_registry)$(inputs.params.ubi_suffix) - tag: $(inputs.params.release_version)-b$(inputs.params.build_id)$(inputs.params.architecture_suffix) - # Below two coordinates are on pair with the e2e_om_ops_manager_upgrade test but - # doesn't seem to reflect the way we push things to Quay. - # The proper fix should be addressed in https://jira.mongodb.org/browse/CLOUDP-133709 - - registry: $(inputs.params.ecr_registry_ubi)$(inputs.params.ubi_suffix) - tag: $(inputs.params.release_version)$(inputs.params.architecture_suffix) - - registry: $(inputs.params.ecr_registry_ubi)$(inputs.params.ubi_suffix) - tag: $(inputs.params.release_version)-b$(inputs.params.build_id)$(inputs.params.architecture_suffix) - diff --git a/inventories/database.yaml b/inventories/database.yaml deleted file mode 100644 index 05d123f31..000000000 --- a/inventories/database.yaml +++ /dev/null @@ -1,65 +0,0 @@ -vars: - quay_registry: quay.io/mongodb/mongodb-kubernetes-database - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-kubernetes-database - -images: -- name: database - vars: - context: docker/mongodb-kubernetes-database - platform: linux/amd64 - - stages: - - name: database-build-context - task_type: docker_build - dockerfile: Dockerfile.builder - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-database-context - tag: $(inputs.params.version_id) - - - name: init-appdb-template-ubi - task_type: dockerfile_template - distro: ubi - tags: ["ubi"] - inputs: - - version - output: - - dockerfile: $(functions.tempfile) - - - name: database-build-ubi - task_type: docker_build - dockerfile: $(stages['init-appdb-template-ubi'].outputs[0].dockerfile) - tags: ["ubi"] - buildargs: - imagebase: $(inputs.params.registry)/mongodb-kubernetes-database-context:$(inputs.params.version_id) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-database - tag: $(inputs.params.version_id) - - - name: master-latest - task_type: tag_image - tags: ["master"] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes-database - tag: $(inputs.params.version_id) - destination: - - registry: $(inputs.params.registry)/mongodb-kubernetes-database - tag: latest - - - name: database-release-context - task_type: tag_image - tags: ["release"] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes-database-context - tag: $(inputs.params.version_id) - destination: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context - - - name: database-template-ubi - task_type: dockerfile_template - distro: ubi - tags: ["release"] - inputs: - - version - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/inventories/init_appdb.yaml b/inventories/init_appdb.yaml deleted file mode 100644 index 50d5d4199..000000000 --- a/inventories/init_appdb.yaml +++ /dev/null @@ -1,69 +0,0 @@ -vars: - quay_registry: quay.io/mongodb/mongodb-kubernetes-init-appdb - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-kubernetes-init-appdb - -images: -- name: init-appdb - vars: - context: . - template_context: docker/mongodb-kubernetes-init-database - platform: linux/amd64 - - stages: - - name: init-appdb-build-context - task_type: docker_build - dockerfile: docker/mongodb-kubernetes-init-database/Dockerfile.builder - buildargs: - mongodb_tools_url_ubi: $(inputs.params.mongodb_tools_url_ubi) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-appdb-context - tag: $(inputs.params.version_id) - - - name: init-appdb-template-ubi - task_type: dockerfile_template - template_file_extension: ubi_minimal - tags: ["ubi"] - inputs: - - is_appdb - output: - - dockerfile: $(functions.tempfile) - - - name: init-appdb-build-ubi - task_type: docker_build - tags: ["ubi"] - buildargs: - version: $(inputs.params.version) - imagebase: $(inputs.params.registry)/mongodb-kubernetes-init-appdb-context:$(inputs.params.version_id) - dockerfile: $(stages['init-appdb-template-ubi'].outputs[0].dockerfile) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-appdb - tag: $(inputs.params.version_id) - - - name: master-latest - task_type: tag_image - tags: [ "master" ] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes-init-appdb - tag: $(inputs.params.version_id) - destination: - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-appdb - tag: latest - - - name: init-appdb-release-context - task_type: tag_image - tags: ["release"] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes-init-appdb-context - tag: $(inputs.params.version_id) - destination: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context - - - name: init-appdb-template-ubi - task_type: dockerfile_template - template_file_extension: ubi_minimal - tags: ["release"] - inputs: - - is_appdb - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/inventories/init_database.yaml b/inventories/init_database.yaml deleted file mode 100644 index 57ab81679..000000000 --- a/inventories/init_database.yaml +++ /dev/null @@ -1,75 +0,0 @@ -vars: - quay_registry: quay.io/mongodb/mongodb-kubernetes-init-database - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-kubernetes-init-database - -images: -- name: init-database - vars: - context: . - template_context: docker/mongodb-kubernetes-init-database - platform: linux/amd64 - - stages: - - name: init-database-build-context - task_type: docker_build - dockerfile: docker/mongodb-kubernetes-init-database/Dockerfile.builder - buildargs: - mongodb_tools_url_ubi: $(inputs.params.mongodb_tools_url_ubi) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-database-context - tag: $(inputs.params.version_id) - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-database-context - tag: $(inputs.params.version) - - - name: init-database-template-ubi - task_type: dockerfile_template - template_file_extension: ubi_minimal - tags: ["ubi"] - inputs: - - is_appdb - output: - - dockerfile: $(functions.tempfile) - - - name: init-database-build-ubi - task_type: docker_build - tags: ["ubi"] - buildargs: - imagebase: $(inputs.params.registry)/mongodb-kubernetes-init-database-context:$(inputs.params.version_id) - version: $(inputs.params.version) - dockerfile: $(stages['init-database-template-ubi'].outputs[0].dockerfile) - inputs: - - is_appdb - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-database - tag: $(inputs.params.version_id) - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-database - tag: $(inputs.params.version) - - - name: master-latest - task_type: tag_image - tags: ["master"] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes-init-database - tag: $(inputs.params.version_id) - destination: - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-database - tag: latest - - - name: init-database-release-context - task_type: tag_image - tags: ["release"] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes-init-database-context - tag: $(inputs.params.version_id) - destination: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context - - - name: init-database-template-ubi - task_type: dockerfile_template - template_file_extension: ubi_minimal - tags: ["release"] - inputs: - - is_appdb - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/inventories/init_om.yaml b/inventories/init_om.yaml deleted file mode 100644 index f3d310470..000000000 --- a/inventories/init_om.yaml +++ /dev/null @@ -1,65 +0,0 @@ -vars: - quay_registry: quay.io/mongodb/mongodb-kubernetes-init-ops-manager - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-kubernetes-init-ops-manager - -images: -- name: init-ops-manager - vars: - context: docker/mongodb-kubernetes-init-ops-manager - platform: linux/amd64 - - stages: - - name: init-ops-manager-build-context - task_type: docker_build - dockerfile: Dockerfile.builder - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-ops-manager-context - tag: $(inputs.params.version_id) - - - name: init-ops-manager-template-ubi - task_type: dockerfile_template - template_file_extension: ubi_minimal - tags: ["ubi"] - inputs: - - version - output: - - dockerfile: $(functions.tempfile) - - - name: init-ops-manager-build-ubi - task_type: docker_build - dockerfile: $(stages['init-ops-manager-template-ubi'].outputs[0].dockerfile) - tags: ["ubi"] - buildargs: - imagebase: $(inputs.params.registry)/mongodb-kubernetes-init-ops-manager-context:$(inputs.params.version_id) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-ops-manager - tag: $(inputs.params.version_id) - - - name: master-latest - task_type: tag_image - tags: ["master"] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes-init-ops-manager - tag: $(inputs.params.version_id) - destination: - - registry: $(inputs.params.registry)/mongodb-kubernetes-init-ops-manager - tag: latest - - - name: init-ops-manager-release-context - task_type: tag_image - tags: ["release"] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes-init-ops-manager-context - tag: $(inputs.params.version_id) - destination: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context - - - name: init-ops-manager-template-ubi - task_type: dockerfile_template - template_file_extension: ubi_minimal - tags: ["release"] - inputs: - - version - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/inventories/mco_test.yaml b/inventories/mco_test.yaml deleted file mode 100644 index c926b4069..000000000 --- a/inventories/mco_test.yaml +++ /dev/null @@ -1,17 +0,0 @@ -images: -- name: community-operator-e2e - vars: - context: . # we will need the whole root for the go tests - platform: linux/amd64 - - stages: - - name: build - task_type: docker_build - dockerfile: docker/mongodb-community-tests/Dockerfile - buildargs: - GOLANG_VERSION: $(inputs.params.golang_version) - output: - - registry: $(inputs.params.registry)/mongodb-community-tests - tag: latest - - registry: $(inputs.params.registry)/mongodb-community-tests - tag: $(inputs.params.version_id) diff --git a/inventories/om.yaml b/inventories/om.yaml deleted file mode 100644 index e4daf3103..000000000 --- a/inventories/om.yaml +++ /dev/null @@ -1,62 +0,0 @@ -vars: - quay_registry: quay.io/mongodb/mongodb-enterprise-ops-manager - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-enterprise-ops-manager - om_registry: 268558157000.dkr.ecr.us-east-1.amazonaws.com - -images: -- name: ops-manager - vars: - context: . - template_context: docker/mongodb-enterprise-ops-manager - platform: linux/amd64 - - stages: - - name: ops-manager-context - task_type: docker_build - dockerfile: docker/mongodb-enterprise-ops-manager/Dockerfile.builder - output: - - registry: $(inputs.params.registry)/ops-manager-context - tag: $(inputs.params.version_id) - - - name: ops-manager-template-ubi - task_type: dockerfile_template - template_file_extension: ubi - tags: ["ubi"] - inputs: - - om_download_url - - version - buildargs: - imagebase: $(inputs.params.registry)/ops-manager-context:$(inputs.params.version_id) - output: - - dockerfile: $(functions.tempfile) - - - name: ops-manager-build - task_type: docker_build - dockerfile: $(stages['ops-manager-template-ubi'].outputs[0].dockerfile) - tags: ["ubi"] - buildargs: - imagebase: $(inputs.params.registry)/ops-manager-context:$(inputs.params.version_id) - output: - - registry: $(inputs.params.om_registry)/dev/mongodb-enterprise-ops-manager-ubi - tag: $(inputs.params.version) - - ## Release tasks - - name: ops-manager-template - task_type: dockerfile_template - template_file_extension: ubi - tags: ["ubi", "release"] - inputs: - - om_download_url - - version - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile - - - name: ops-manager-context-release - task_type: tag_image - tags: ["release"] - source: - registry: $(inputs.params.registry)/ops-manager-context - tag: $(inputs.params.version_id) - destination: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context diff --git a/inventories/readiness_probe.yaml b/inventories/readiness_probe.yaml deleted file mode 100644 index c871ac093..000000000 --- a/inventories/readiness_probe.yaml +++ /dev/null @@ -1,62 +0,0 @@ -vars: - quay_registry: quay.io/mongodb/mongodb-kubernetes-readinessprobe - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-kubernetes-readinessprobe - -images: - - name: mongodb-kubernetes-readinessprobe - vars: - context: . - template_context: docker/mongodb-kubernetes-readinessprobe - platform: linux/$(inputs.params.architecture) - - stages: - - name: readiness-probe-build-context - task_type: docker_build - dockerfile: docker/mongodb-kubernetes-readinessprobe/Dockerfile.builder - tags: ["ubi"] - buildargs: - GOLANG_VERSION: $(inputs.params.golang_version) - TARGETOS: linux - TARGETARCH: $(inputs.params.architecture) - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-readinessprobe - tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) - - - name: readiness-probe-build-context-release - task_type: docker_build - tags: ["release"] - dockerfile: docker/mongodb-kubernetes-readinessprobe/Dockerfile.builder - buildargs: - GOLANG_VERSION: $(inputs.params.golang_version) - TARGETOS: linux - TARGETARCH: $(inputs.params.architecture) - output: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context-$(inputs.params.architecture) - - - name: readiness-probe-build - task_type: docker_build - tags: ["ubi"] - buildargs: - imagebase: $(inputs.params.registry)/mongodb-kubernetes-readinessprobe:$(inputs.params.version_id)-context-$(inputs.params.architecture) - version: $(inputs.params.version) - dockerfile: docker/mongodb-kubernetes-readinessprobe/Dockerfile.old - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-readinessprobe - tag: $(inputs.params.version_id)-$(inputs.params.architecture) - - registry: $(inputs.params.registry)/mongodb-kubernetes-readinessprobe - tag: latest-$(inputs.params.architecture) - - - name: readiness-probe-template - task_type: dockerfile_template - tags: ["release"] - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/inventories/test.yaml b/inventories/test.yaml deleted file mode 100644 index c83d96bc3..000000000 --- a/inventories/test.yaml +++ /dev/null @@ -1,17 +0,0 @@ -images: -- name: test - vars: - context: docker/mongodb-kubernetes-tests - platform: linux/amd64 - - stages: - - name: build - task_type: docker_build - dockerfile: Dockerfile - buildargs: - PYTHON_VERSION: $(inputs.params.python_version) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-tests - tag: latest - - registry: $(inputs.params.registry)/mongodb-kubernetes-tests - tag: $(inputs.params.version_id) diff --git a/inventories/upgrade_hook.yaml b/inventories/upgrade_hook.yaml deleted file mode 100644 index 0540fb8e8..000000000 --- a/inventories/upgrade_hook.yaml +++ /dev/null @@ -1,62 +0,0 @@ -vars: - quay_registry: quay.io/mongodb/mongodb-kubernetes-operator-version-upgrade-post-start-hook - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-kubernetes-operator-version-upgrade-post-start-hook - -images: - - name: mongodb-kubernetes-operator-version-upgrade-post-start-hook - vars: - context: . - template_context: docker/mongodb-kubernetes-upgrade-hook - platform: linux/$(inputs.params.architecture) - - stages: - - name: readiness-probe-build-context - task_type: docker_build - dockerfile: docker/mongodb-kubernetes-upgrade-hook/Dockerfile.builder - tags: ["ubi"] - buildargs: - GOLANG_VERSION: $(inputs.params.golang_version) - TARGETOS: linux - TARGETARCH: $(inputs.params.architecture) - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-operator-version-upgrade-post-start-hook - tag: $(inputs.params.version_id)-context-$(inputs.params.architecture) - - - name: readiness-probe-build-context-release - task_type: docker_build - tags: ["release"] - dockerfile: docker/mongodb-kubernetes-upgrade-hook/Dockerfile.builder - buildargs: - GOLANG_VERSION: $(inputs.params.golang_version) - TARGETOS: linux - TARGETARCH: $(inputs.params.architecture) - output: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context-$(inputs.params.architecture) - - - name: readiness-probe-build - task_type: docker_build - tags: ["ubi"] - buildargs: - imagebase: $(inputs.params.registry)/mongodb-kubernetes-operator-version-upgrade-post-start-hook:$(inputs.params.version_id)-context-$(inputs.params.architecture) - version: $(inputs.params.version) - dockerfile: docker/mongodb-kubernetes-upgrade-hook/Dockerfile.old - - labels: - quay.expires-after: 48h - - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes-operator-version-upgrade-post-start-hook - tag: $(inputs.params.version_id)-$(inputs.params.architecture) - - registry: $(inputs.params.registry)/mongodb-kubernetes-operator-version-upgrade-post-start-hook - tag: latest-$(inputs.params.architecture) - - - name: readiness-probe-template - task_type: dockerfile_template - tags: ["release"] - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/inventory.yaml b/inventory.yaml deleted file mode 100644 index 23690c328..000000000 --- a/inventory.yaml +++ /dev/null @@ -1,100 +0,0 @@ -vars: - registry: - quay_registry: quay.io/mongodb/mongodb-kubernetes - s3_bucket: s3://enterprise-operator-dockerfiles/dockerfiles/mongodb-kubernetes - -images: -- name: mongodb-kubernetes - vars: - context: . - template_context: docker/mongodb-kubernetes-operator - platform: linux/$(inputs.params.architecture) - inputs: - - version - - log_automation_config_diff - - architecture - - stages: - - - name: mongodb-kubernetes-context - task_type: docker_build - dockerfile: docker/mongodb-kubernetes-operator/Dockerfile.builder - buildargs: - release_version: $(inputs.params.version) - log_automation_config_diff: $(inputs.params.log_automation_config_diff) - use_race: "false" - TARGETOS: linux - TARGETARCH: $(inputs.params.architecture) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes - tag: $(inputs.params.version)-context-$(inputs.params.architecture) - - - name: operator-race-context - task_type: docker_build - dockerfile: docker/mongodb-kubernetes-operator/Dockerfile.builder - buildargs: - release_version: $(inputs.params.version) - log_automation_config_diff: $(inputs.params.log_automation_config_diff) - use_race: "true" - TARGETOS: linux - TARGETARCH: $(inputs.params.architecture) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes - tag: $(inputs.params.version)-context-race-$(inputs.params.architecture) - - - name: operator-template-ubi - task_type: dockerfile_template - distro: ubi - inputs: - - version - - debug - output: - - dockerfile: $(functions.tempfile) - - - name: operator-ubi-build - task_type: docker_build - dockerfile: $(stages['operator-template-ubi'].outputs[0].dockerfile) - buildargs: - imagebase: $(inputs.params.registry)/mongodb-kubernetes:$(inputs.params.version)-context-$(inputs.params.architecture) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes - tag: $(inputs.params.version)-$(inputs.params.architecture) - - # we don't do multi-arch for race images; so we can just directly release it - - name: operator-ubi-race-build - task_type: docker_build - dockerfile: $(stages['operator-template-ubi'].outputs[0].dockerfile) - buildargs: - imagebase: $(inputs.params.registry)/mongodb-kubernetes:$(inputs.params.version)-context-race-$(inputs.params.architecture) - output: - - registry: $(inputs.params.registry)/mongodb-kubernetes - tag: $(inputs.params.version)-race - - - name: master-latest - task_type: tag_image - tags: [ "master" ] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes - tag: $(inputs.params.version)-$(inputs.params.architecture) - destination: - - registry: $(inputs.params.registry)/mongodb-kubernetes - tag: latest-$(inputs.params.architecture) - - - name: mongodb-kubernetes-release - task_type: tag_image - tags: ["release"] - source: - registry: $(inputs.params.registry)/mongodb-kubernetes - tag: $(inputs.params.version)-context-$(inputs.params.architecture) - destination: - - registry: $(inputs.params.quay_registry) - tag: $(inputs.params.version)-context-$(inputs.params.architecture) - - - name: operator-template-ubi - task_type: dockerfile_template - tags: ["release"] - distro: ubi - inputs: - - version - output: - - dockerfile: $(inputs.params.s3_bucket)/$(inputs.params.version)/ubi/Dockerfile diff --git a/lib/base_logger.py b/lib/base_logger.py index 20fd33627..ff4c2637f 100644 --- a/lib/base_logger.py +++ b/lib/base_logger.py @@ -23,10 +23,3 @@ logger.propagate = False logger.addHandler(stdout_handler) logger.addHandler(stderr_handler) - -# Sonar logger -sonar_logger = logging.getLogger("sonar") -sonar_logger.setLevel(LOGLEVEL) -sonar_logger.propagate = False -sonar_logger.addHandler(stdout_handler) -sonar_logger.addHandler(stderr_handler) diff --git a/lib/sonar/.gitignore b/lib/sonar/.gitignore deleted file mode 100644 index cd7ec2375..000000000 --- a/lib/sonar/.gitignore +++ /dev/null @@ -1,11 +0,0 @@ -.DS_Store -.idea -*.log -tmp/ - -*.py[cod] -*.egg -htmlcov - -sonar.egg-info/* -sonar.iml diff --git a/lib/sonar/.pylintrc b/lib/sonar/.pylintrc deleted file mode 100644 index ca7e33165..000000000 --- a/lib/sonar/.pylintrc +++ /dev/null @@ -1,3 +0,0 @@ -[MESSAGES CONTROL] - -disable=missing-docstring,empty-docstring,invalid-name diff --git a/lib/sonar/CODE_OF_CONDUCT.md b/lib/sonar/CODE_OF_CONDUCT.md deleted file mode 100644 index 5fb8baa30..000000000 --- a/lib/sonar/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,6 +0,0 @@ -# Code of Conduct - -This project has adopted the [MongoDB Code of -Conduct](https://www.mongodb.com/community-code-of-conduct). If you see any -violations of the above or have any other concerns or questions please contact -us using the following email alias: community-conduct@mongodb.com. diff --git a/lib/sonar/CONTRIBUTING.md b/lib/sonar/CONTRIBUTING.md deleted file mode 100644 index 4477cfa07..000000000 --- a/lib/sonar/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -# Contributing - -## Workflow - -MongoDB welcomes community contributions! If you’re interested in making a -contribution to Sonar, please follow the steps below before you start writing -any code: - -1. Sign the [contributor's agreement](http://www.mongodb.com/contributor). This - will allow us to review and accept contributions. -1. Fork the repository on GitHub. -1. Create a branch with a name that briefly describes your feature. -1. Implement your feature or bug fix. -1. Add new cases to `./test` that verify your bug fix or make sure no one - unintentionally breaks your feature in the future and run them with `python - -m pytest`. -1. Add comments around your new code that explain what's happening. -1. Commit and push your changes to your branch then submit a pull request. diff --git a/lib/sonar/LICENSE b/lib/sonar/LICENSE deleted file mode 100644 index 6c13e8ea0..000000000 --- a/lib/sonar/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [2021] [MongoDB Inc.] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/lib/sonar/README.md b/lib/sonar/README.md deleted file mode 100644 index 3966f3d62..000000000 --- a/lib/sonar/README.md +++ /dev/null @@ -1,96 +0,0 @@ -# Sonar 🐳 - -Work with multiple Docker images easily. - -**Sonar is currently Work in Progress!** - -## What is Sonar - -Sonar is a tool that allows you to easily produce, template, build and publish -Dockerfiles and Docker images. It uses a declarative, multi-stage approach to -build Docker images. - -## Quick start - -Sonar can be used as a Python module or as a standalone program. Sonar will look -for an `inventory.yaml` file in your local directory that should contain a -collection of images to build and stages for each one of those images. A -different inventory file can be specified using `--inventory `. - -Sonar comes with an inventory file to be able to build itself, and to run its -unit tests. This [simple.yaml](inventories/simple.yaml) is: - -``` yaml -vars: - # start a local registry with: - # docker run -d -p 5000:5000 --restart=always --name registry registry:2 - registry: localhost:5000 - -images: -- name: sonar-test-runner - - vars: - context: . - - # First stage builds a Docker image. The resulting image will be - # pushed to the registry in the `output` section. - stages: - - name: build-sonar-tester-image - task_type: docker_build - - dockerfile: docker/Dockerfile - - output: - - registry: $(inputs.params.registry)/sonar-tester-image - tag: $(inputs.params.version_id) - - # Second stage pushes the previously built image into a new - # registry. - - name: tag-image - task_type: tag_image - - source: - registry: $(stages['build-sonar-tester-image'].output[0].registry) - tag: $(stages['build-sonar-tester-image'].output[0].tag) - - destination: - - registry: $(inputs.params.registry)/sonar-tester-image - tag: latest -``` - -To execute this inventory file, you can do: - -``` -$ python sonar.py --image sonar-test-runner --inventory inventories/simple.yaml - -[build-sonar-tester-image/docker_build] stage-started build-sonar-tester-image: 1/2 -[build-sonar-tester-image/docker_build] docker-image-push: localhost:5000/sonar-tester-image:8945563b-248e-4c03-bb0a-6cc15cff1a6e -[tag-image/tag_image] stage-started tag-image: 2/2 -[tag-image/tag_image] docker-image-push: localhost:5000/sonar-tester-image:latest -``` - -At the end of this phase, you'll have a Docker image tagged as -`localhost:5000/sonar-tester-image:latest` that you will be able to run with: - -``` -$ docker run localhost:5000/sonar-tester-image:latest -============================= test session starts ============================== -platform linux -- Python 3.9.4, pytest-6.2.4, py-1.10.0, pluggy-0.13.1 -rootdir: /src -collected 38 items - -test/test_build.py ... [ 7%] -test/test_context.py ......x..... [ 39%] -test/test_sign_image.py .. [ 44%] -test/test_sonar.py ........ [ 65%] -test/test_tag_image.py . [ 68%] -test/test_tags.py ........... [ 97%] -test/test_template.py . [100%] - -======================== 37 passed, 1 xfailed in 0.52s ========================= -``` - - -## Legal - -Sonar is released under the terms of the [Apache2 license](./LICENSE). diff --git a/lib/sonar/__init__.py b/lib/sonar/__init__.py deleted file mode 100644 index a6d43718d..000000000 --- a/lib/sonar/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -DCT_ENV_VARIABLE = "DOCKER_CONTENT_TRUST" -DCT_PASSPHRASE = "DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE" diff --git a/lib/sonar/builders/__init__.py b/lib/sonar/builders/__init__.py deleted file mode 100644 index bb000503c..000000000 --- a/lib/sonar/builders/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -class SonarBuildError(Exception): - """Wrapper over docker.errors.BuildError""" - - pass - - -class SonarAPIError(Exception): - """Wrapper over docker.errors.APIError""" - - pass - - -def buildarg_from_dict(args): - if args is None: - return "" - - return " ".join(["--build-arg {}={}".format(k, v) for k, v in args.items()]) - - -def labels_from_dict(args): - if args is None: - return "" - - return " ".join(["--label {}={}".format(k, v) for k, v in args.items()]) diff --git a/lib/sonar/builders/docker.py b/lib/sonar/builders/docker.py deleted file mode 100644 index 14c2bf91a..000000000 --- a/lib/sonar/builders/docker.py +++ /dev/null @@ -1,220 +0,0 @@ -import random -import shutil -import subprocess -from typing import Dict, Optional - -import docker.errors -from opentelemetry import trace - -import docker -from lib.base_logger import logger - -from . import SonarAPIError - -TRACER = trace.get_tracer("evergreen-agent") - - -def docker_client() -> docker.DockerClient: - return docker.client.from_env(timeout=60 * 60 * 24) - - -@TRACER.start_as_current_span("docker_build") -def docker_build( - path: str, - dockerfile: str, - buildargs: Optional[Dict[str, str]] = None, - labels: Optional[Dict[str, str]] = None, - platform: Optional[str] = None, -): - """Builds a docker image.""" - - image_name = "sonar-docker-build-{}".format(random.randint(1, 10000)) - - logger.info("path: {}".format(path)) - logger.info("dockerfile: {}".format(dockerfile)) - logger.info("tag: {}".format(image_name)) - logger.info("buildargs: {}".format(buildargs)) - logger.info("labels: {}".format(labels)) - - try: - # docker build from docker-py has bugs resulting in errors or invalid platform when building with specified --platform=linux/amd64 on M1 - docker_build_cli( - path=path, - dockerfile=dockerfile, - tag=image_name, - buildargs=buildargs, - labels=labels, - platform=platform, - ) - - client = docker_client() - image = client.images.get(image_name) - logger.info("successfully built docker-image, SHA256: {}".format(image.id)) - - span = trace.get_current_span() - span.set_attribute("mck.image.sha256", image.id) - - return image - except docker.errors.APIError as e: - raise SonarAPIError from e - - -def _get_build_log(e: docker.errors.BuildError) -> str: - build_logs = "\n" - for item in e.build_log: - if "stream" not in item: - continue - item_str = item["stream"] - build_logs += item_str - return build_logs - - -def docker_build_cli( - path: str, - dockerfile: str, - tag: str, - buildargs: Optional[Dict[str, str]], - labels=Optional[Dict[str, str]], - platform=Optional[str], -): - dockerfile_path = dockerfile - # if dockerfile is relative it has to be set as relative to context (path) - if not dockerfile_path.startswith("/"): - dockerfile_path = f"{path}/{dockerfile_path}" - - args = get_docker_build_cli_args( - path=path, dockerfile=dockerfile_path, tag=tag, buildargs=buildargs, labels=labels, platform=platform - ) - - args_str = " ".join(args) - logger.info(f"executing cli docker build: {args_str}") - - cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - if cp.returncode != 0: - raise SonarAPIError(cp.stderr) - - -def get_docker_build_cli_args( - path: str, - dockerfile: str, - tag: str, - buildargs: Optional[Dict[str, str]], - labels=Optional[Dict[str, str]], - platform=Optional[str], -): - # Find docker executable dynamically to work across different environments - docker_cmd = shutil.which("docker") - if docker_cmd is None: - raise Exception("Docker executable not found in PATH") - - args = [docker_cmd, "buildx", "build", "--load", "--progress", "plain", path, "-f", dockerfile, "-t", tag] - if buildargs is not None: - for k, v in buildargs.items(): - args.append("--build-arg") - args.append(f"{k}={v}") - - if labels is not None: - for k, v in labels.items(): - args.append("--label") - args.append(f"{k}={v}") - - if platform is not None: - args.append("--platform") - args.append(platform) - - return args - - -def docker_pull( - image: str, - tag: str, -): - client = docker_client() - - try: - return client.images.pull(image, tag=tag) - except docker.errors.APIError as e: - raise SonarAPIError from e - - -def docker_tag( - image: docker.models.images.Image, - registry: str, - tag: str, -): - try: - return image.tag(registry, tag) - except docker.errors.APIError as e: - raise SonarAPIError from e - - -@TRACER.start_as_current_span("image_exists") -def image_exists(repository, tag): - """Check if a Docker image with the specified tag exists in the repository using efficient HEAD requests.""" - logger.info(f"checking image {tag}, exists in remote repository: {repository}") - - return check_registry_image_exists(repository, tag) - - -def check_registry_image_exists(repository, tag): - """Check if image exists in generic registries using HTTP HEAD requests.""" - import requests - - try: - # Determine registry URL format - parts = repository.split("/") - registry_domain = parts[0] - repository_path = "/".join(parts[1:]) - - # Construct URL for manifest check - url = f"https://{registry_domain}/v2/{repository_path}/manifests/{tag}" - headers = {"Accept": "application/vnd.docker.distribution.manifest.v2+json"} - - # Make HEAD request instead of full manifest retrieval - response = requests.head(url, headers=headers, timeout=3) - return response.status_code == 200 - except Exception as e: - logger.warning(f"Error checking registry for {repository}:{tag}: {e}") - return False - - -@TRACER.start_as_current_span("docker_push") -def docker_push(registry: str, tag: str): - docker_cmd = shutil.which("docker") - if docker_cmd is None: - raise Exception("Docker executable not found in PATH") - - def inner_docker_push(should_raise=False): - - # We can't use docker-py here - # as it doesn't support DOCKER_CONTENT_TRUST - # env variable, which could be needed - cp = subprocess.run( - [docker_cmd, "push", f"{registry}:{tag}"], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - if cp.returncode != 0: - if should_raise: - raise SonarAPIError(cp.stderr) - - return False - - return True - - # We don't want to rebuild context images if they already exist. - # Context images should be out and immutable. - # This is especially important for base image changes like ubi8 to ubi9, we don't want to replace our existing - # agent-ubi8 with agent-ubi9 images and break for older operators. - # Instead of doing the hack here, we should instead either: - # - make sonar aware of context images - # - move the logic out of sonar to pipeline.py to all the places where we build context images - if "-context" in tag and image_exists(registry, tag): - logger.info(f"Image: {tag} in registry: {registry} already exists skipping pushing it") - else: - logger.info("Image does not exist remotely or is not a context image, pushing it!") - retries = 3 - while retries >= 0: - if inner_docker_push(retries == 0): - break - retries -= 1 diff --git a/lib/sonar/inventories/inventory-template.yaml b/lib/sonar/inventories/inventory-template.yaml deleted file mode 100644 index 372880709..000000000 --- a/lib/sonar/inventories/inventory-template.yaml +++ /dev/null @@ -1,49 +0,0 @@ -## This is a more complex inventory file. It has a few features on it: -## -## 1. A dockerfile can be a Jinja2 template, like `docker/Dockerfile.template` -## 2. This template dockerfile gets rendered into a concrete Dockerfile in a -## temp file on disk, using the $(functions.tempfile) function. -## 3. The name of this tempfile is passed further and used by a subsequent -## stage using the `$(stages['stage-name'].outputs[0].dockerfile)` -## -## To run this inventory you have to: -## -## ./sonar.py --image sonar-test-runner --inventory inventories/inventory-template.yaml -## - -vars: - # start a local registry with: - # docker run -d -p 5000:5000 --restart=always --name registry registry:2 - registry: localhost:5000 - -images: -- name: sonar-test-runner - - vars: - template_context: docker - context: . - - # First stage builds a Docker image. The resulting image will be - # pushed to the registry in the `output` section. - stages: - - - name: template-sonar - task_type: dockerfile_template - template_file_extension: 3.10rc # Template will be `Dockerfile.3.10rc` - - output: - # We will use $(functions.tempfile) to use a temporary file. The name of the - # temporary file will have to be accessed using - # `$(stages['stage-name']).outputs` afterwards. - - dockerfile: $(functions.tempfile) - - - name: build-sonar-tester-image - task_type: docker_build - - dockerfile: $(stages['template-sonar'].outputs[0].dockerfile) - - output: - - registry: $(inputs.params.registry)/sonar-template-test - tag: $(inputs.params.version_id) - - registry: $(inputs.params.registry)/sonar-template-test - tag: latest diff --git a/lib/sonar/inventories/simple.yaml b/lib/sonar/inventories/simple.yaml deleted file mode 100644 index 7d69c5a15..000000000 --- a/lib/sonar/inventories/simple.yaml +++ /dev/null @@ -1,35 +0,0 @@ -vars: - # start a local registry with: - # docker run -d -p 5000:5000 --restart=always --name registry registry:2 - registry: localhost:5000 - -images: -- name: sonar-test-runner - - vars: - context: . - - # First stage builds a Docker image. The resulting image will be - # pushed to the registry in the `output` section. - stages: - - name: build-sonar-tester-image - task_type: docker_build - - dockerfile: docker/Dockerfile - - output: - - registry: $(inputs.params.registry)/sonar-tester-image - tag: $(inputs.params.version_id) - - # Second stage pushes the previously built image into a new - # registry. - - name: tag-image - task_type: tag_image - - source: - registry: $(stages['build-sonar-tester-image'].outputs[0].registry) - tag: $(stages['build-sonar-tester-image'].outputs[0].tag) - - destination: - - registry: $(inputs.params.registry)/sonar-tester-image - tag: latest diff --git a/lib/sonar/sonar.py b/lib/sonar/sonar.py deleted file mode 100644 index c97b2f0da..000000000 --- a/lib/sonar/sonar.py +++ /dev/null @@ -1,770 +0,0 @@ -""" -sonar/sonar.py - -Implements Sonar's main functionality. -""" - -import json -import os -import re -import subprocess -import tempfile -import uuid -from dataclasses import dataclass, field -from pathlib import Path -from shutil import copyfile -from typing import Any, Dict, List, Optional, Tuple, Union -from urllib.request import urlretrieve - -import boto3 -import click -import yaml - -from lib.base_logger import logger - -from . import DCT_ENV_VARIABLE, DCT_PASSPHRASE -from .builders.docker import ( - SonarAPIError, - docker_build, - docker_pull, - docker_push, - docker_tag, -) -from .template import render - - -# pylint: disable=R0902 -@dataclass -class Context: - """ - Sonar's Execution Context. - - Holds information required for a run, including execution parameters, - inventory dictionary, tags (included and excluded). - """ - - inventory: Dict[str, str] - image: Dict[str, Dict] - - # Store parameters passed as arguments - parameters: Dict[str, str] - - skip_tags: List[str] = None - include_tags: List[str] = None - - stage: Dict = None - - # If continue_on_errors is set to true, errors will - # be captured and logged, but will not raise, and will - # not stop future tasks to be executed. - continue_on_errors: bool = True - - # If errors happened during the execution an exception - # will be raised. This can help on situations were some - # errors were captured (continue_on_errors == True) but - # we still want to fail the overall task. - fail_on_errors: bool = False - - # List of captured errors to report. - captured_errors: List[Exception] = field(default_factory=list) - - # Defines if running in pipeline mode, this is, the output - # is supposed to be consumable by the system calling sonar. - pipeline: bool = False - output: dict = field(default_factory=dict) - - # Generates a version_id to use if one is not present - stored_version_id: str = str(uuid.uuid4()) - - # stage_outputs is a dictionary of dictionaries. First dict - # has a key corresponding to the name of the stage, the dict - # you get from it is key/value (str, Any) with the values - # stored by given stage. - stage_outputs: Dict[str, List[Dict[str, Any]]] = field(default_factory=dict) - - # pylint: disable=C0103 - def I(self, string): - """ - I interpolates variables in string. - """ - return interpolate_vars(self, string, stage=self.stage) - - @property - def image_name(self): - """Returns current image name""" - return self.image["name"] - - @property - def version_id(self): - """Returns the version_id for this run. - - In evergreen context, it corresponds to Evergreen's run version_id, locally - a uuid is used as a way of having independent builds. - """ - return os.environ.get("version_id", self.stored_version_id) - - -def append_output_in_context(ctx: Context, stage_name: str, values: Dict) -> None: - """Stores a value as the output of the stage, so it can be consumed by future stages.""" - if stage_name not in ctx.stage_outputs.keys(): - # adds a new empty dictionary to this stage - # if there isn't one yet. - ctx.stage_outputs[stage_name] = list() - - ctx.stage_outputs[stage_name].append(values) - - -def find_inventory(inventory: Optional[str] = None): - """ - Finds the inventory file, and return it as a yaml object. - """ - if inventory is None: - inventory = "inventory.yaml" - - # pylint: disable=C0103 - with open(inventory, "r") as f: - return yaml.safe_load(f) - - -def find_image(image_name: str, inventory: str): - """ - Looks for an image of the given name in the inventory. - """ - for image in find_inventory(inventory)["images"]: - if image["name"] == image_name: - return image - - raise ValueError("Image {} not found".format(image_name)) - - -def find_variable_replacement(ctx: Context, variable: str, stage=None) -> str: - """ - Returns the variable *value* for this varable. - """ - if variable == "version_id": - return ctx.version_id - - replacement = None - # Find variable value on top level file - if "vars" in ctx.inventory: - if variable in ctx.inventory["vars"]: - replacement = ctx.inventory["vars"][variable] - - # Find top-level defined variables overrides, - # these might not be defined anywhere in the inventory file. - # maybe they should? - if variable in ctx.parameters: - replacement = ctx.parameters[variable] - - # Find variable value on image - if "vars" in ctx.image: - if variable in ctx.image["vars"]: - replacement = ctx.image["vars"][variable] - - # Find variables in stage - if stage is not None and "vars" in stage: - if variable in stage["vars"]: - replacement = stage["vars"][variable] - - # Find variable values on cli parameters - if "inputs" in ctx.image: - if variable in ctx.image["inputs"]: - # If in inputs then we get it form the parameters - replacement = ctx.parameters[variable] - - return replacement - - -def find_variable_replacements(ctx: Context, variables: List[str], stage=None) -> Dict[str, str]: - """ - Finds replacements for a list of variables. - """ - replacements = {} - for variable in variables: - value = find_variable_replacement(ctx, variable, stage) - if value is None: - raise ValueError("No value for variable {}".format(variable)) - - replacements[variable] = value - - return replacements - - -def execute_interpolatable_function(name: str) -> str: - if name == "tempfile": - tmp = tempfile.mkstemp() - # mkstemp returns a tuple, with the second element of it being - # the absolute path to the file. - return tmp[1] - - raise ValueError("Only supported function is 'tempfile'") - - -def find_variables_to_interpolate_from_stage(string: str) -> List[Any]: - """Finds a $(stage['stage-name'].outputs[])""" - var_finder_re = r"\$\(stages\[\'(?P[\w-]+)\'\]\.outputs\[(?P\d+)\]\.(?P\w+)" - - return re.findall(var_finder_re, string, re.UNICODE) - - -def find_variables_to_interpolate(string) -> List[str]: - """ - Returns a list of variables in the string that need to be interpolated. - """ - var_finder_re = r"\$\(inputs\.params\.(?P\w+)\)" - return re.findall(var_finder_re, string, re.UNICODE) - - -def find_functions_to_interpolate(string: str) -> List[Any]: - """Find functions to be interpolated.""" - var_finder_re = r"\$\(functions\.(?P\w+)\)" - - return re.findall(var_finder_re, string, re.UNICODE) - - -def interpolate_vars(ctx: Context, string: str, stage=None) -> str: - """ - For each variable to interpolate in string, finds its *value* and - replace it in the final string. - """ - variables = find_variables_to_interpolate(string) - replacements = find_variable_replacements(ctx, variables, stage) - - for variable in variables: - string = string.replace("$(inputs.params.{})".format(variable), replacements[variable]) - - variables = find_variables_to_interpolate_from_stage(string) - for stage, index, key in variables: - value = ctx.stage_outputs[stage][int(index)][key] - string = string.replace("$(stages['{}'].outputs[{}].{})".format(stage, index, key), value) - - functions = find_functions_to_interpolate(string) - for name in functions: - value = execute_interpolatable_function(name) - string = string.replace("$(functions.{})".format(name), value) - - return string - - -def build_add_statement(ctx, block) -> str: - """ - DEPRECATED: do not use - """ - stmt = "ADD " - if "from" in block: - stmt += "--from={} ".format(block["from"]) - - src = ctx.I(block["src"]) - dst = ctx.I(block["dst"]) - stmt += "{} {}\n".format(src, dst) - - return stmt - - -def find_docker_context(ctx: Context): - """ - Finds a docker context in multiple places in the inventory, image or stage. - """ - if ctx.stage is not None: - if "vars" in ctx.stage and "context" in ctx.stage["vars"]: - return ctx.stage["vars"]["context"] - - if "dockercontext" in ctx.stage: - return ctx.stage["dockercontext"] - - if "vars" in ctx.image and "context" in ctx.image["vars"]: - return ctx.image["vars"]["context"] - - raise ValueError("No context defined for image or stage") - - -def should_skip_stage(stage: Dict[str, str], skip_tags: List[str]) -> bool: - """ - Checks if this stage should be skipped. - """ - stage_tags = stage.get("tags", []) - if len(stage_tags) == 0: - return False - - return not set(stage_tags).isdisjoint(skip_tags) - - -def should_include_stage(stage: Dict[str, str], include_tags: List[str]) -> bool: - """ - Checks if this stage should be included in the run. If tags is empty, then - all stages should be run, included this one. - """ - stage_tags = stage.get("tags", []) - if len(include_tags) == 0: - # We don't have "include_tags" so all tasks should run - return True - - return not set(stage_tags).isdisjoint(include_tags) - - -def task_dockerfile_create(ctx: Context): - """Writes a simple Dockerfile from SCRATCH and ADD statements. This - is intended to build a 'context' Dockerfile, this is, a Dockerfile that's - not runnable but contains data. - - DEPRECATED: Use dockerfile_template or docker_build instead. - """ - output_dockerfile = ctx.I(ctx.stage["output"][0]["dockerfile"]) - fro = ctx.stage.get("from", "scratch") - - # pylint: disable=C0103 - with open("{}".format(output_dockerfile), "w") as fd: - fd.write("FROM {}\n".format(fro)) - for f in ctx.stage["static_files"]: - fd.write(build_add_statement(ctx, f)) - - echo(ctx, "dockerfile-save-location", output_dockerfile) - - -def get_secret(secret_name: str, region: str) -> str: - session = boto3.session.Session() - client = session.client(service_name="secretsmanager", region_name=region) - - get_secret_value_response = client.get_secret_value(SecretId=secret_name) - - return get_secret_value_response.get("SecretString", "") - - -def get_private_key_id(registry: str, signer_name: str) -> str: - cp = subprocess.run( - ["docker", "trust", "inspect", registry], - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - - if cp.returncode != 0: - return SonarAPIError(cp.stderr) - - json_data = json.loads(cp.stdout) - assert len(json_data) != 0 - for signer in json_data[0]["Signers"]: - if signer_name == signer["Name"]: - assert len(signer["Keys"]) != 0 - return signer["Keys"][0]["ID"] + ".key" - - -def task_tag_image(ctx: Context): - """ - Pulls an image from source and pushes into destination. - """ - registry = ctx.I(ctx.stage["source"]["registry"]) - tag = ctx.I(ctx.stage["source"]["tag"]) - - image = docker_pull(registry, tag) - - for output in ctx.stage["destination"]: - registry = ctx.I(output["registry"]) - tag = ctx.I(output["tag"]) - echo( - ctx, - "docker-image-push", - "{}:{}".format(registry, tag), - ) - - docker_tag(image, registry, tag) - create_ecr_repository(registry) - try: - docker_push(registry, tag) - except SonarAPIError as e: - ctx.captured_errors.append(e) - if ctx.continue_on_errors: - echo(ctx, "docker-image-push/error", e) - else: - raise - - append_output_in_context(ctx, ctx.stage["name"], {"registry": registry, "tag": tag}) - - -def get_rendering_params(ctx: Context) -> Dict[str, str]: - """ - Finds rendering parameters for a template, based on the `inputs` section - of the stage. - """ - params = {} - for param in ctx.stage.get("inputs", {}): - params[param] = find_variable_replacement(ctx, param, ctx.stage) - - return params - - -def run_dockerfile_template(ctx: Context, dockerfile_context: str, distro: str) -> str: - """ - Renders a template and returns a file name pointing at the render. - """ - path = dockerfile_context - params = get_rendering_params(ctx) - - logger.debug("rendering params are:") - logger.debug(params) - - rendered = render(path, distro, params) - tmp = tempfile.NamedTemporaryFile(delete=False) - tmp.write(rendered.encode("utf-8")) - - return tmp.name - - -def interpolate_dict(ctx: Context, args: Dict[str, str]) -> Dict[str, str]: - """ - Returns a copy of the provided dictionary with their variables interpolated with values. - """ - copied_args = {} - # pylint: disable=C0103 - for k, v in args.items(): - copied_args[k] = ctx.I(v) - - return copied_args - - -def is_valid_ecr_repo(repo_name: str) -> bool: - """Returns true if repo_name is a ECR repository, it expectes - a domain part (*.amazonaws.com) and a repository part (/images/container-x/...).""" - rex = re.compile(r"^[0-9]{10,}\.dkr\.ecr\.[a-z]{2}\-[a-z]+\-[0-9]+\.amazonaws\.com/.+") - return rex.match(repo_name) is not None - - -def create_ecr_repository(tag: str): - """ - Creates ecr repository if it doesn't exist - """ - if not is_valid_ecr_repo(tag): - logger.info("Not an ECR repository: %s", tag) - return - - try: - no_tag = tag.partition(":")[0] - region = no_tag.split(".")[3] - repository_name = no_tag.partition("/")[2] - except IndexError: - logger.debug("Could not parse repository: %s", tag) - return - - logger.debug("Creating repository in %s with name %s", region, repository_name) - - client = boto3.client("ecr", region_name=region) - - try: - client.create_repository( - repositoryName=repository_name, - imageTagMutability="MUTABLE", - imageScanningConfiguration={"scanOnPush": False}, - ) - except client.exceptions.RepositoryAlreadyExistsException: - logger.debug("Repository already exists") - - -def echo(ctx: Context, entry_name: str, message: str, foreground: str = "white"): - """ - Echoes a message. - """ - - err = ctx.pipeline - section = ctx.output - - if ctx.pipeline: - image_name = ctx.image["name"] - if image_name not in ctx.output: - ctx.output[image_name] = {} - section = ctx.output[image_name] - - if ctx.stage is not None: - stage_name = ctx.stage["name"] - if stage_name not in ctx.output[image_name]: - ctx.output[image_name][stage_name] = {} - section = ctx.output[image_name][stage_name] - - section[entry_name] = message - - stage_title = "" - if ctx.stage: - stage_type = ctx.stage["task_type"] - stage_name = ctx.stage["name"] - stage_title = "[{}/{}] ".format(stage_name, stage_type) - - # If --pipeline, these messages go to stderr - - click.secho("{}{}: {}".format(stage_title, entry_name, message), fg=foreground, err=err) - - -def find_dockerfile(dockerfile: str): - """Returns a Dockerfile file location that can be local or remote. If remote it - will be downloaded into a temporary location first.""" - - if dockerfile.startswith("https://"): - tmpfile = tempfile.NamedTemporaryFile(delete=False) - urlretrieve(dockerfile, tmpfile.name) - - return tmpfile.name - - return dockerfile - - -def is_signing_enabled(output: Dict) -> bool: - return all( - key in output - for key in ( - "signer_name", - "key_secret_name", - "passphrase_secret_name", - "region", - ) - ) - - -def setup_signing_environment(ctx: Context, output: Dict) -> str: - os.environ[DCT_ENV_VARIABLE] = "1" - os.environ[DCT_PASSPHRASE] = get_secret(ctx.I(output["passphrase_secret_name"]), ctx.I(output["region"])) - # Asks docker trust inspect for the name the private key for the specified signer - # has to have - signing_key_name = get_private_key_id(ctx.I(output["registry"]), ctx.I(output["signer_name"])) - - # And writes the private key stored in the secret to the appropriate path - private_key = get_secret(ctx.I(output["key_secret_name"]), ctx.I(output["region"])) - docker_trust_path = f"{Path.home()}/.docker/trust/private" - Path(docker_trust_path).mkdir(parents=True, exist_ok=True) - with open(f"{docker_trust_path}/{signing_key_name}", "w+") as f: - f.write(private_key) - - return signing_key_name - - -def task_docker_build(ctx: Context): - """ - Builds a container image. - """ - docker_context = find_docker_context(ctx) - - platform = ctx.image.get("platform") - if platform: - platform = ctx.I(platform) - - dockerfile = find_dockerfile(ctx.I(ctx.stage["dockerfile"])) - - buildargs = interpolate_dict(ctx, ctx.stage.get("buildargs", {})) - - labels = interpolate_dict(ctx, ctx.stage.get("labels", {})) - - image = docker_build(docker_context, dockerfile, buildargs=buildargs, labels=labels, platform=platform) - - for output in ctx.stage["output"]: - registry = ctx.I(output["registry"]) - tag = ctx.I(output["tag"]) - sign = is_signing_enabled(output) - signing_key_name = "" - if sign: - signing_key_name = setup_signing_environment(ctx, output) - - echo(ctx, "docker-image-push", "{}:{}".format(registry, tag)) - docker_tag(image, registry, tag) - - create_ecr_repository(registry) - try: - docker_push(registry, tag) - except SonarAPIError as e: - ctx.captured_errors.append(e) - if ctx.continue_on_errors: - echo(ctx, "docker-image-push/error", e) - else: - raise - - append_output_in_context( - ctx, - ctx.stage["name"], - { - "registry": registry, - "tag": tag, - }, - ) - - if sign: - clear_signing_environment(signing_key_name) - - -def split_s3_location(s3loc: str) -> Tuple[str, str]: - if not s3loc.startswith("s3://"): - raise ValueError("{} is not a S3 URL".format(s3loc)) - - bucket, _, location = s3loc.partition("s3://")[2].partition("/") - - return bucket, location - - -def save_dockerfile(dockerfile: str, destination: str): - if destination.startswith("s3://"): - client = boto3.client("s3") - bucket, location = split_s3_location(destination) - client.upload_file(dockerfile, bucket, location, ExtraArgs={"ACL": "public-read"}) - else: - copyfile(dockerfile, destination) - - -def task_dockerfile_template(ctx: Context): - """ - Templates a dockerfile. - """ - docker_context = find_docker_context(ctx) - template_context = docker_context - - try: - template_context = ctx.image["vars"]["template_context"] - except KeyError: - pass - - template_file_extension = ctx.stage.get("template_file_extension") - if template_file_extension is None: - # Use distro as compatibility with pre 0.11 - template_file_extension = ctx.stage.get("distro") - - dockerfile = run_dockerfile_template(ctx, template_context, template_file_extension) - - for output in ctx.stage["output"]: - if "dockerfile" in output: - output_dockerfile = ctx.I(output["dockerfile"]) - save_dockerfile(dockerfile, output_dockerfile) - - echo(ctx, "dockerfile-save-location", output_dockerfile) - - append_output_in_context(ctx, ctx.stage["name"], {"dockerfile": output_dockerfile}) - - -def find_skip_tags(params: Optional[Dict[str, str]] = None) -> List[str]: - """Returns a list of tags passed in params that should be excluded from the build.""" - if params is None: - params = {} - - tags = params.get("skip_tags", []) - - if isinstance(tags, str): - tags = [t.strip() for t in tags.split(",") if t != ""] - - return tags - - -def find_include_tags(params: Optional[Dict[str, str]] = None) -> List[str]: - """Returns a list of tags passed in params that should be included in the build.""" - if params is None: - params = {} - - tags = params.get("include_tags", []) - - if isinstance(tags, str): - tags = [t.strip() for t in tags.split(",") if t != ""] - - return tags - - -def clear_signing_environment(key_to_remove: str): - # Note that this is not strictly needed - os.unsetenv(DCT_ENV_VARIABLE) - os.unsetenv(DCT_PASSPHRASE) - os.remove(f"{Path.home()}/.docker/trust/private/{key_to_remove}") - - -# pylint: disable=R0913, disable=R1710 -def process_image( - image_name: str, - skip_tags: Union[str, List[str]], - include_tags: Union[str, List[str]], - build_args: Optional[Dict[str, str]] = None, - inventory: Optional[str] = None, - build_options: Optional[Dict[str, str]] = None, -): - """ - Runs the Sonar process over an image, for an inventory and a set of configurations. - """ - if build_args is None: - build_args = {} - - ctx = build_context(image_name, skip_tags, include_tags, build_args, inventory, build_options) - - echo(ctx, "image_build_start", image_name, foreground="yellow") - - for idx, stage in enumerate(ctx.image.get("stages", [])): - ctx.stage = stage - name = ctx.stage["name"] - task = stage["task_type"] - if should_skip_stage(stage, ctx.skip_tags): - echo(ctx, "skipping-stage", name, foreground="green") - continue - - if not should_include_stage(stage, ctx.include_tags): - echo(ctx, "skipping-stage", name, foreground="green") - continue - - stages_len = len(ctx.image["stages"]) - - echo(ctx, f"stage-started {name} - task-started {task}", f"{idx + 1}/{stages_len}") - - if task == "dockerfile_create": - task_dockerfile_create(ctx) - elif task == "dockerfile_template": - task_dockerfile_template(ctx) - elif task == "docker_build": - task_docker_build(ctx) - elif task == "tag_image": - task_tag_image(ctx) - else: - raise NotImplementedError("task_type {} not supported".format(task)) - - if len(ctx.captured_errors) > 0 and ctx.fail_on_errors: - echo(ctx, "docker-image-push/captured-errors", ctx.captured_errors) - raise SonarAPIError(ctx.captured_errors[0]) - - if ctx.pipeline: - return ctx.output - - -def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: - """ - Returns a list of strings from multiple different types. - """ - if value is None: - return [] - - if isinstance(value, str): - if len(value) == 0: - return [] - - return [e.strip() for e in value.split(",") if e != ""] - - return value - - -def build_context( - image_name: str, - skip_tags: Union[str, List[str]], - include_tags: Union[str, List[str]], - build_args: Optional[Dict[str, str]] = None, - inventory: Optional[str] = None, - build_options: Optional[Dict[str, str]] = None, -) -> Context: - """A Context includes the whole inventory, the image to build, the current stage, - and the `I` interpolation function.""" - image = find_image(image_name, inventory) - - if build_args is None: - build_args = dict() - build_args = build_args.copy() - logger.debug("Should skip tags %s", skip_tags) - - if build_options is None: - build_options = {} - - context = Context( - inventory=find_inventory(inventory), - image=image, - parameters=build_args, - skip_tags=make_list_of_str(skip_tags), - include_tags=make_list_of_str(include_tags), - ) - - for k, v in build_options.items(): - if hasattr(context, k): - setattr(context, k, v) - - return context diff --git a/lib/sonar/template.py b/lib/sonar/template.py deleted file mode 100644 index e92fbb53a..000000000 --- a/lib/sonar/template.py +++ /dev/null @@ -1,20 +0,0 @@ -# -*- coding: utf-8 -*- - -from typing import Dict - -import jinja2 - - -def render(path: str, template_name: str, parameters: Dict[str, str]) -> str: - """Returns a rendered Dockerfile. - - path indicates where in the filesystem the Dockerfiles are. - template_name references a Dockerfile. to render. - """ - env = jinja2.Environment(loader=jinja2.FileSystemLoader(path), undefined=jinja2.StrictUndefined) - - template = "Dockerfile.old" - if template_name is not None: - template = "Dockerfile.{}".format(template_name) - - return env.get_template(template).render(parameters) diff --git a/lib/sonar/test/__init__.py b/lib/sonar/test/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/lib/sonar/test/test_build.py b/lib/sonar/test/test_build.py deleted file mode 100644 index bb1bd848b..000000000 --- a/lib/sonar/test/test_build.py +++ /dev/null @@ -1,131 +0,0 @@ -from types import SimpleNamespace as sn -from unittest.mock import call, mock_open, patch - -from ..builders.docker import get_docker_build_cli_args -from ..sonar import find_dockerfile, process_image - - -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -@patch("sonar.sonar.urlretrieve") -@patch("sonar.sonar.create_ecr_repository") -def test_dockerfile_from_url( - patched_docker_build, - patched_docker_tag, - patched_docker_push, - patched_urlretrive, - patched_create_ecr_repository, -): - with open("lib/sonar/test/yaml_scenario6.yaml") as fd: - with patch("builtins.open", mock_open(read_data=fd.read())) as _mock_file: - pipeline = process_image( - image_name="image0", - skip_tags=[], - include_tags=["test_dockerfile_from_url"], - build_args={}, - ) - - patched_urlretrive.assert_called_once() - patched_docker_build.assert_called_once() - patched_docker_tag.assert_called_once() - patched_docker_push.assert_called_once() - patched_create_ecr_repository.assert_called_once() - - -@patch("sonar.sonar.tempfile.NamedTemporaryFile", return_value=sn(name="random-filename")) -@patch("sonar.sonar.urlretrieve") -def test_find_dockerfile_fetches_file_from_url(patched_urlretrieve, patched_tempfile): - # If passed a dockerfile which starts with https:// - # make sure urlretrieve and NamedTemporaryFile is called - dockerfile = find_dockerfile("https://something") - - patched_urlretrieve.assert_called_once() - patched_tempfile.assert_called_once_with(delete=False) - assert dockerfile == "random-filename" - - patched_urlretrieve.reset_mock() - - # If dockerfile is a localfile, urlretrieve should not be called. - dockerfile = find_dockerfile("/localfile/somewhere") - patched_urlretrieve.assert_not_called() - assert dockerfile == "/localfile/somewhere" - - -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -def test_labels_are_passed_to_docker_build(_docker_build, _docker_tag): - _ = process_image( - image_name="image1", - skip_tags=[], - include_tags=[], - build_args={}, - build_options={}, - inventory="lib/sonar/test/yaml_scenario9.yaml", - ) - - # the labels have been specified in the test scenario and should be passed to docker_build. - calls = [ - call( - ".", - "Dockerfile", - buildargs={}, - labels={"label-0": "value-0", "label-1": "value-1", "label-2": "value-2"}, - platform=None, - ) - ] - - _docker_build.assert_has_calls(calls) - _docker_build.assert_called_once() - - -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -def test_platform_is_passed_to_docker_build(_docker_build, _docker_tag): - # platform in image is set - _ = process_image( - image_name="image1", - skip_tags=[], - include_tags=[], - build_args={}, - build_options={}, - inventory="lib/sonar/test/yaml_scenario11.yaml", - ) - - # platform is not specified - _ = process_image( - image_name="image2", - skip_tags=[], - include_tags=[], - build_args={}, - build_options={}, - inventory="lib/sonar/test/yaml_scenario11.yaml", - ) - - calls = [ - call(".", "Dockerfile", buildargs={}, labels={"label-0": "value-0"}, platform="linux/amd64"), - call(".", "Dockerfile", buildargs={}, labels={"label-1": "value-1"}, platform=None), - ] - - _docker_build.assert_has_calls(calls) - _docker_build.assert_called() - - -@patch("sonar.builders.docker.shutil.which", return_value="/mock/path/to/docker") -def test_get_docker_build_cli_args(mock_which): - assert "/mock/path/to/docker buildx build --load --progress plain . -f dockerfile -t image:latest" == " ".join( - get_docker_build_cli_args(".", "dockerfile", "image:latest", None, None, None) - ) - assert ( - "/mock/path/to/docker buildx build --load --progress plain . -f dockerfile -t image:latest --build-arg a=1 --build-arg long_arg=long_value --label l1=v1 --label l2=v2 --platform linux/amd64" - == " ".join( - get_docker_build_cli_args( - ".", - "dockerfile", - "image:latest", - {"a": "1", "long_arg": "long_value"}, - {"l1": "v1", "l2": "v2"}, - "linux/amd64", - ) - ) - ) diff --git a/lib/sonar/test/test_context.py b/lib/sonar/test/test_context.py deleted file mode 100644 index ee985ecba..000000000 --- a/lib/sonar/test/test_context.py +++ /dev/null @@ -1,344 +0,0 @@ -# -*- coding: utf-8 -*- - -from unittest.mock import mock_open, patch - -import pytest - -from ..sonar import ( - Context, - append_output_in_context, - build_context, - find_skip_tags, - should_skip_stage, -) - - -# yaml_scenario0 -@pytest.fixture() -def ys0(): - return open("lib/sonar/test/yaml_scenario0.yaml").read() - - -@pytest.fixture() -def cs0(ys0): - with patch("builtins.open", mock_open(read_data=ys0)) as mock_file: - ctx = build_context( - image_name="image0", - skip_tags=[], - include_tags=[], - build_args={}, - ) - ctx.stage = ctx.image["stages"][0] - - return ctx - - -# yaml_scenario1 -@pytest.fixture() -def ys1(): - return open("lib/sonar/test/yaml_scenario1.yaml").read() - - -@pytest.fixture() -def cs1(ys1): - with patch("builtins.open", mock_open(read_data=ys1)) as mock_file: - ctx = build_context( - image_name="image0", - skip_tags=[], - include_tags=[], - build_args={}, - ) - ctx.stage = ctx.image["stages"][0] - - return ctx - - -# yaml_scenario2 -@pytest.fixture() -def ys2(): - return open("lib/sonar/test/yaml_scenario2.yaml").read() - - -@pytest.fixture() -def cs2(ys2): - with patch("builtins.open", mock_open(read_data=ys2)) as mock_file: - ctx = build_context( - image_name="image0", - skip_tags=[], - include_tags=[], - build_args={ - "image_input0": "🐳", - "image_input1": "🎄", - "non_defined_in_inventory": "yes", - }, - ) - ctx.stage = ctx.image["stages"][0] - - return ctx - - -def test_skip_tags(): - params = { - "some": "thing", - "skip_tags": "ubi,rhel", - } - - tags = find_skip_tags(params) - assert len(tags) == 2 - assert tags[0] == "ubi" - assert tags[1] == "rhel" - assert "skip_tags" in params - - tags = find_skip_tags() - assert tags == [] - - params = { - "some": "thing", - "skip_tags": "ubi", - } - - tags = find_skip_tags(params) - assert len(tags) == 1 - assert tags[0] == "ubi" - assert "skip_tags" in params - assert "some" in params - - -def test_should_skip_tags(): - stage = { - "name": "something", - "tags": ["tag0", "tag1"], - } - - assert should_skip_stage(stage, ["tag0"]) - assert should_skip_stage(stage, ["tag1"]) - assert not should_skip_stage(stage, ["another-tag"]) - - stage = { - "name": "something", - } - assert not should_skip_stage(stage, ["tag0"]) - - stage = { - "name": "something", - "tags": ["ubi"], - } - - assert not should_skip_stage(stage, ["ubuntu"]) - - -def test_build_context(cs0): - ctx = cs0 - assert ctx.image_name == "image0" - assert ctx.skip_tags == None - assert ctx.parameters == {} - - -def test_build_context(ys0): - with patch("builtins.open", mock_open(read_data=ys0)) as mock_file: - with pytest.raises(ValueError, match="Image image1 not found"): - build_context(image_name="image1", skip_tags=[], include_tags=[]) - - -def test_variable_interpolation0(cs1): - ctx = cs1 - - assert ctx.I("$(inputs.params.registry)/something") == "somereg/something" - with pytest.raises(KeyError): - ctx.I("$(inputs.params.input0)") - - -def test_variable_interpolation1(cs2): - ctx = cs2 - - # Inventory variables - assert ctx.I("$(inputs.params.inventory_var0)") == "inventory_var_value0" - assert ctx.I("$(inputs.params.inventory_var1)") == "inventory_var_value1" - with pytest.raises(ValueError): - ctx.I("$(inputs.params.inventory_var_non_existing)") - - # Parameters passed to function - assert ctx.I("$(inputs.params.image_input0)") == "🐳" - assert ctx.I("$(inputs.params.image_input1)") == "🎄" - with pytest.raises(ValueError): - ctx.I("$(inputs.params.image_input_non_existing)") - - # Image variables - assert ctx.I("$(inputs.params.image_var0)") == "image_var_value0" - assert ctx.I("$(inputs.params.image_var1)") == "image_var_value1" - with pytest.raises(ValueError): - ctx.I("$(inputs.params.image_var_non_existing)") - - # Stage variables - assert ctx.I("$(inputs.params.stage_var0)") == "stage_value0" - assert ctx.I("$(inputs.params.stage_var1)") == "stage_value1" - with pytest.raises(ValueError): - assert ctx.I("$(inputs.params.stage_var_non_existing)") == "stage_value2" - - # Parameters passed but not defined in inventory - assert ctx.I("$(inputs.params.non_defined_in_inventory)") == "yes" - - with pytest.raises(ValueError): - assert ctx.I("$(inputs.params.defined_nowhere)") - - -def test_variable_interpolation_stage_parameters(ys1): - with patch("builtins.open", mock_open(read_data=ys1)) as mock_file: - ctx = build_context( - image_name="image0", - skip_tags=[], - include_tags=[], - build_args={"input0": "value0", "input1": "value1"}, - ) - - ctx.stage = ctx.image["stages"][0] - - assert ctx.I("$(inputs.params.input0)") == "value0" - assert ctx.I("$(inputs.params.input1)") == "value1" - assert ctx.I("$(inputs.params.input0)/$(inputs.params.input1)") == "value0/value1" - assert ctx.I("$(inputs.params.input1)/$(inputs.params.input0)") == "value1/value0" - assert ( - ctx.I("some text $(inputs.params.input1)/$(inputs.params.input0) more text") - == "some text value1/value0 more text" - ) - - assert ctx.I("$(inputs.params.input0) 🐳") == "value0 🐳" - with pytest.raises(ValueError): - ctx.I("$(inputs.params.non_existing)") - - -@pytest.mark.xfail -def test_variable_interpolation_stage_parameters_funny(ys1): - """This test won't work and I'm not sure why: - 1. Maybe parsing the yaml file won't get the same unicode code? - 2. Regex won't capture it""" - with patch("builtins.open", mock_open(read_data=ys1)) as mock_file: - ctx = build_context( - image_name="image0", - skip_tags=[], - include_tags=[], - build_args={"🐳": "whale", "🎄": "tree"}, - ) - ctx.stage = ctx.image["stages"][0] - - assert ctx.I("$(inputs.params.🐳)") == "whale" - assert ctx.I("$(inputs.params.🎄)") == "tree" - - -@patch("sonar.sonar.find_image", return_value={}) -def test_build_context_skip_tags_from_str(_patched_find_image): - ctx = build_context( - inventory="lib/sonar/inventories/simple.yaml", - image_name="image-name", - skip_tags="skip0,skip1", - include_tags="included0, included1", - build_args={}, - ) - - assert ctx.skip_tags == ["skip0", "skip1"] - assert ctx.include_tags == ["included0", "included1"] - - -@patch("sonar.sonar.find_image", return_value={}) -def test_build_context_skip_tags_from_empty_str(_patched_find_image): - ctx = build_context( - inventory="lib/sonar/inventories/simple.yaml", - image_name="image-name", - skip_tags="", - include_tags="", - build_args={}, - ) - - assert ctx.skip_tags == [] - assert ctx.include_tags == [] - - -@patch("sonar.sonar.find_inventory", return_value={"images": {"name": "image-name"}}) -@patch("sonar.sonar.find_image", return_value={"name": "image-name"}) -def test_build_context_uses_any_inventory(patched_find_image, patched_find_inventory): - build_context( - image_name="image-name", - skip_tags="", - include_tags="", - build_args={}, - inventory="other-inventory.yaml", - ) - - patched_find_image.assert_called_once_with("image-name", "other-inventory.yaml") - patched_find_inventory.assert_called_once_with("other-inventory.yaml") - - -def test_use_specific_inventory(): - context = build_context( - image_name="image0", - skip_tags="", - include_tags="", - build_args={"input0": "my-value"}, - inventory="lib/sonar/test/yaml_scenario0.yaml", - ) - - assert context.image["name"] == "image0" - assert context.stage is None - - assert context.skip_tags == [] - assert context.include_tags == [] - - assert context.I("$(inputs.params.input0)") == "my-value" - - -def test_can_provide_generic_configuration(): - context = build_context( - image_name="image0", - skip_tags=[], - include_tags=[], - build_args={}, - inventory="lib/sonar/test/yaml_scenario0.yaml", - build_options={"invalid_options": False, "continue_on_errors": False}, - ) - - assert context.continue_on_errors is False - assert not hasattr(context, "invalid_options") - - -def test_can_store_in_context(): - ctx = Context( - inventory={}, - image="some-image", - parameters=[], - ) - - append_output_in_context(ctx, "stage0", {"key0": "value0", "key1": "value1", "key2": "value2"}) - - append_output_in_context(ctx, "stage0", {"key0": "value0", "key1": "value1", "key2": "value2", "key3": "value3"}) - - assert len(ctx.stage_outputs["stage0"]) == 2 - assert len(ctx.stage_outputs["stage0"][0]) == 3 - assert len(ctx.stage_outputs["stage0"][1]) == 4 - - assert ctx.stage_outputs["stage0"][0] == {"key0": "value0", "key1": "value1", "key2": "value2"} - - assert ctx.stage_outputs["stage0"][1] == {"key0": "value0", "key1": "value1", "key2": "value2", "key3": "value3"} - - append_output_in_context(ctx, "stage1", {"key0": "value0", "key1": "value1", "key2": "value2", "key3": "value3"}) - - assert len(ctx.stage_outputs) == 2 - assert len(ctx.stage_outputs["stage1"][0]) == 4 - - assert ctx.stage_outputs["stage1"][0] == {"key0": "value0", "key1": "value1", "key2": "value2", "key3": "value3"} - - assert ctx.I("$(stages['stage0'].outputs[0].key0)") == "value0" - assert ctx.I("$(stages['stage0'].outputs[1].key3)") == "value3" - - -def test_stages_output_and_variables(cs2: Context): - ctx = cs2 - append_output_in_context(ctx, "stage0", {"key0": "value0", "key1": "value1", "key2": "value2"}) - - append_output_in_context(ctx, "stage0", {"key0": "value0", "key1": "value1", "key2": "value2", "key3": "value3"}) - - assert ( - ctx.I("$(inputs.params.inventory_var0) -- $(stages['stage0'].outputs[0].key2)") - == "inventory_var_value0 -- value2" - ) - - assert ctx.I("$(inputs.params.image_input0) -- $(stages['stage0'].outputs[1].key3)") == "🐳 -- value3" diff --git a/lib/sonar/test/test_docker.py b/lib/sonar/test/test_docker.py deleted file mode 100644 index 370ed667c..000000000 --- a/lib/sonar/test/test_docker.py +++ /dev/null @@ -1,46 +0,0 @@ -from types import SimpleNamespace -from unittest.mock import Mock, call, patch - -import pytest -from pytest_mock import MockerFixture - -from ..builders import SonarAPIError -from ..builders.docker import docker_push - - -@patch("sonar.builders.docker.shutil.which", return_value="/mock/path/to/docker") -def test_docker_push_is_retried(mock_which, mocker: MockerFixture): - a = SimpleNamespace(returncode=1, stderr="some-error") - sp = mocker.patch("sonar.builders.docker.subprocess") - sp.PIPE = "|PIPE|" - sp.run.return_value = a - - with pytest.raises(SonarAPIError, match="some-error"): - docker_push("reg", "tag") - - # docker push is called 4 times, the last time it is called, it raises an exception - sp.run.assert_has_calls( - [ - call(["/mock/path/to/docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), - call(["/mock/path/to/docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), - call(["/mock/path/to/docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), - call(["/mock/path/to/docker", "push", "reg:tag"], stdout="|PIPE|", stderr="|PIPE|"), - ] - ) - - -@patch("sonar.builders.docker.shutil.which", return_value="/mock/path/to/docker") -def test_docker_push_is_retried_and_works(mock_which, mocker: MockerFixture): - ok = SimpleNamespace(returncode=0) - sp = mocker.patch("sonar.builders.docker.subprocess") - sp.PIPE = "|PIPE|" - sp.run = Mock() - sp.run.return_value = ok - - docker_push("reg", "tag") - - sp.run.assert_called_once_with( - ["/mock/path/to/docker", "push", "reg:tag"], - stdout="|PIPE|", - stderr="|PIPE|", - ) diff --git a/lib/sonar/test/test_sign_image.py b/lib/sonar/test/test_sign_image.py deleted file mode 100644 index 5edd284e0..000000000 --- a/lib/sonar/test/test_sign_image.py +++ /dev/null @@ -1,155 +0,0 @@ -import os -import os.path -from pathlib import Path -from unittest.mock import call, mock_open, patch - -import pytest -from sonar import DCT_ENV_VARIABLE, DCT_PASSPHRASE - -from ..sonar import is_signing_enabled, process_image - - -@pytest.fixture() -def ys7(): - return open("lib/sonar/test/yaml_scenario7.yaml").read() - - -@pytest.fixture() -def ys8(): - return open("lib/sonar/test/yaml_scenario8.yaml").read() - - -@patch("sonar.sonar.get_secret", return_value="SECRET") -@patch("sonar.sonar.get_private_key_id", return_value="abc.key") -@patch("sonar.sonar.clear_signing_environment") -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -@patch("sonar.sonar.urlretrieve") -@patch("sonar.sonar.create_ecr_repository") -def test_sign_image( - patched_create_ecr_repository, - patched_urlretrive, - patched_docker_build, - patched_docker_tag, - patched_docker_push, - patched_clear_signing_environment, - patched_get_private_key_id, - patched_get_secret, - ys7, -): - with patch("builtins.open", mock_open(read_data=ys7)) as mock_file: - pipeline = process_image( - image_name="image0", - skip_tags=[], - include_tags=[], - build_args={}, - ) - - patched_clear_signing_environment.assert_called_once_with("abc.key") - assert os.environ.get(DCT_ENV_VARIABLE, "0") == "1" - assert os.environ.get(DCT_PASSPHRASE, "0") == "SECRET" - - secret_calls = [ - call("test/kube/passphrase", "us-east-1"), - call("test/kube/secret", "us-east-1"), - ] - - patched_get_secret.assert_has_calls(secret_calls) - patched_get_private_key_id.assert_called_once_with("foo", "evergreen_ci") - - -def test_is_signing_enabled(): - test_cases = [ - { - "input": { - "signer_name": "foo", - "key_secret_name": "key_name", - "passphrase_secret_name": "pass_name", - "region": "us-east-1", - }, - "result": True, - }, - { - "input": { - "key_secret_name": "key_name", - "passphrase_secret_name": "pass_name", - "region": "us-east-1", - }, - "result": False, - }, - { - "input": { - "signer_name": "foo", - "passphrase_secret_name": "pass_name", - "region": "us-east-1", - }, - "result": False, - }, - { - "input": { - "signer_name": "foo", - "key_secret_name": "key_name", - "region": "us-east-1", - }, - "result": False, - }, - { - "input": { - "signer_name": "foo", - "key_secret_name": "key_name", - "passphrase_secret_name": "pass_name", - }, - "result": False, - }, - ] - - for case in test_cases: - assert is_signing_enabled(case["input"]) == case["result"] - - -@patch("sonar.sonar.get_secret", return_value="SECRET") -@patch("sonar.sonar.get_private_key_id", return_value="abc.key") -@patch("sonar.sonar.clear_signing_environment") -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -@patch("sonar.sonar.urlretrieve") -@patch("sonar.sonar.create_ecr_repository") -def test_sign_image( - patched_create_ecr_repository, - patched_urlretrive, - patched_docker_build, - patched_docker_tag, - patched_docker_push, - patched_clear_signing_environment, - patched_get_private_key_id, - patched_get_secret, - ys8, -): - with patch("builtins.open", mock_open(read_data=ys8)) as mock_file: - pipeline = process_image( - image_name="image0", - skip_tags=[], - include_tags=[], - build_args={}, - ) - - clear_calls = [call("abc.key"), call("abc.key"), call("abc.key")] - patched_clear_signing_environment.assert_has_calls(clear_calls) - assert os.environ.get(DCT_ENV_VARIABLE, "0") == "1" - assert os.environ.get(DCT_PASSPHRASE, "0") == "SECRET" - - secret_calls = [ - call("test/kube/passphrase", "us-east-1"), - call("test/kube/secret", "us-east-1"), - ] - - patched_get_secret.assert_has_calls(secret_calls) - - private_key_calls = [ - call("foo", "evergreen_ci"), - call("foo2", "evergreen_ci"), - call("foo3", "evergreen_ci_foo"), - ] - patched_get_private_key_id.assert_has_calls(private_key_calls) diff --git a/lib/sonar/test/test_sonar.py b/lib/sonar/test/test_sonar.py deleted file mode 100644 index 9341db5ce..000000000 --- a/lib/sonar/test/test_sonar.py +++ /dev/null @@ -1,167 +0,0 @@ -import logging -from unittest.mock import Mock, call, patch - -import pytest - -from ..sonar import ( - SonarAPIError, - create_ecr_repository, - is_valid_ecr_repo, - process_image, -) - - -@patch("sonar.sonar.find_inventory", return_value={"images": {"name": "image-name"}}) -@patch("sonar.sonar.find_image", return_value={"name": "image-name"}) -def test_specific_inventory(patched_find_image, patched_find_inventory): - process_image( - image_name="image-name", - skip_tags=[], - include_tags=[], - build_args={}, - inventory="other-inventory.yaml", - ) - - patched_find_image.assert_called_once_with("image-name", "other-inventory.yaml") - patched_find_inventory.assert_called_once_with("other-inventory.yaml") - - -def test_repo_is_not_ecr(): - repos = ( - "quay.io/some-org/some-repo", - "scan.connect.redhat.com/ospid-10001000100-1000/some-repo", - "docker.io/some-more", - "1.dkr.ecr.us-east-1.amazonaws.com", # needs bigger account number - "1.dkr.ecr.us-east.amazonaws.com", # zone is not defined - ) - for repo in repos: - assert is_valid_ecr_repo(repo) is False - - -def test_repo_is_ecr(): - repos = ( - "123456789012.dkr.ecr.eu-west-1.amazonaws.com/some-other-repo", - "123456789012.dkr.ecr.us-east-1.amazonaws.com/something-else", - ) - for repo in repos: - assert is_valid_ecr_repo(repo) - - -@patch("sonar.sonar.boto3.client") -def test_create_ecr_repository_creates_repo_when_ecr_repo(patched_client: Mock): - returned_client = Mock() - patched_client.return_value = returned_client - - # repository with no tag - create_ecr_repository( - "123456789012.dkr.ecr.eu-west-1.amazonaws.com/some-other-repo", - ) - patched_client.assert_called_once() - returned_client.create_repository.assert_called_once_with( - repositoryName="some-other-repo", - imageTagMutability="MUTABLE", - imageScanningConfiguration={"scanOnPush": False}, - ) - patched_client.reset_mock() - - # repository with a tag - create_ecr_repository( - "123456789012.dkr.ecr.eu-west-1.amazonaws.com/some-other-repo:some-tag", - ) - patched_client.assert_called_once() - returned_client.create_repository.assert_called_once_with( - repositoryName="some-other-repo", - imageTagMutability="MUTABLE", - imageScanningConfiguration={"scanOnPush": False}, - ) - - -@patch("sonar.sonar.boto3.client") -def test_create_ecr_repository_doesnt_create_repo_when_not_ecr_repo( - patched_client: Mock, -): - returned_client = Mock() - patched_client.return_value = returned_client - - create_ecr_repository( - "my-private-repo.com/something", - ) - patched_client.assert_not_called() - - -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -def test_continue_on_errors(_docker_build, _docker_tag, mocked_docker_push): - """We'll mock a function that fails on first iteration but succeeds the seconds one.""" - mocked_docker_push.return_value = None - mocked_docker_push.side_effect = ["All ok!", SonarAPIError("fake-error"), "All ok!"] - - pipeline = process_image( - image_name="image1", - skip_tags=[], - include_tags=["test_continue_on_errors"], - build_args={}, - build_options={"pipeline": True, "continue_on_errors": True}, - inventory="lib/sonar/test/yaml_scenario6.yaml", - ) - - # Assert docker_push was called three times, even if one of them failed - assert mocked_docker_push.call_count == 3 - - -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -def test_do_not_continue_on_errors(_docker_build, _docker_tag, mocked_docker_push): - mocked_docker_push.return_value = None - mocked_docker_push.side_effect = [ - SonarAPIError("fake-error-should-not-continue"), - "All ok!", - ] - - with pytest.raises(SonarAPIError): - pipeline = process_image( - image_name="image1", - skip_tags=[], - include_tags=["test_continue_on_errors"], - build_args={}, - build_options={ - "pipeline": True, - "continue_on_errors": False, - }, - inventory="lib/sonar/test/yaml_scenario6.yaml", - ) - - # docker_push raised first time, only one call expected - assert mocked_docker_push.call_count == 1 - - -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -def test_fail_on_captured_errors(_docker_build, _docker_tag, mocked_docker_push): - mocked_docker_push.return_value = None - mocked_docker_push.side_effect = [ - "All ok!", - SonarAPIError("fake-error-should-not-continue"), - "All ok!", - ] - - with pytest.raises(SonarAPIError): - pipeline = process_image( - image_name="image1", - skip_tags=[], - include_tags=["test_continue_on_errors"], - build_args={}, - build_options={ - "pipeline": True, - "continue_on_errors": True, - "fail_on_errors": True, - }, - inventory="lib/sonar/test/yaml_scenario6.yaml", - ) - - # docker_push raised second time time, but allowed to continue, - # anyway, process_image still raised at the end! - assert mocked_docker_push.call_count == 3 diff --git a/lib/sonar/test/test_tag_image.py b/lib/sonar/test/test_tag_image.py deleted file mode 100644 index e86226d10..000000000 --- a/lib/sonar/test/test_tag_image.py +++ /dev/null @@ -1,50 +0,0 @@ -from unittest.mock import call, mock_open, patch - -import pytest - -from ..sonar import process_image - - -@pytest.fixture() -def ys4(): - return open("lib/sonar/test/yaml_scenario4.yaml").read() - - -@patch("sonar.sonar.create_ecr_repository") -@patch("sonar.sonar.docker_pull", return_value="123") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_push") -def test_tag_image( - patched_docker_push, - patched_docker_tag, - patched_docker_pull, - patched_create_ecr_repository, - ys4, -): - with patch("builtins.open", mock_open(read_data=ys4)) as mock_file: - pipeline = process_image( - image_name="image0", - skip_tags=[], - include_tags=[], - build_args={}, - ) - - patched_docker_pull.assert_called_once_with("source-registry-0-test_value0", "source-tag-0-test_value1") - - tag_calls = [ - call("123", "dest-registry-0-test_value0", "dest-tag-0-test_value0-test_value1"), - call("123", "dest-registry-1-test_value0", "dest-tag-1-test_value0-test_value1"), - ] - patched_docker_tag.assert_has_calls(tag_calls) - - push_calls = [ - call("dest-registry-0-test_value0", "dest-tag-0-test_value0-test_value1"), - call("dest-registry-1-test_value0", "dest-tag-1-test_value0-test_value1"), - ] - patched_docker_push.assert_has_calls(push_calls) - - create_ecr_calls = [ - call("dest-registry-0-test_value0"), - call("dest-registry-1-test_value0"), - ] - patched_create_ecr_repository.assert_has_calls(create_ecr_calls) diff --git a/lib/sonar/test/test_tags.py b/lib/sonar/test/test_tags.py deleted file mode 100644 index 74a259f9e..000000000 --- a/lib/sonar/test/test_tags.py +++ /dev/null @@ -1,177 +0,0 @@ -from unittest.mock import mock_open, patch - -import pytest - -from ..sonar import ( - find_include_tags, - find_skip_tags, - process_image, - should_include_stage, - should_skip_stage, -) - - -@pytest.fixture() -def ys3(): - return open("lib/sonar/test/yaml_scenario3.yaml").read() - - -def test_include_tags_empty_params(): - assert find_include_tags(None) == [] - assert find_include_tags({}) == [] - assert find_include_tags({"nop": 1}) == [] - - -def test_include_tags_is_list(): - assert find_include_tags({"include_tags": ["1", "2"]}) == ["1", "2"] - assert find_include_tags({"nop": 1, "include_tags": ["1", "2"]}) == ["1", "2"] - - -def test_include_tags_is_str(): - assert find_include_tags({"include_tags": ""}) == [] - assert find_include_tags({"include_tags": "1,2"}) == ["1", "2"] - assert find_include_tags({"include_tags": "hi"}) == ["hi"] - assert find_include_tags({"include_tags": "hi,"}) == ["hi"] - - -def test_skip_tags0(): - assert find_skip_tags({"skip_tags": ""}) == [] - assert find_skip_tags(None) == [] - assert find_skip_tags({}) == [] - assert find_skip_tags({"nop": 1}) == [] - assert find_skip_tags({"nop": 1, "skip_tags": []}) == [] - - assert find_skip_tags({"nop": 1, "skip_tags": ["1"]}) == ["1"] - assert find_skip_tags({"nop": 1, "skip_tags": ["1", "2"]}) == ["1", "2"] - - assert find_skip_tags({"nop": 1, "skip_tags": "1"}) == ["1"] - assert find_skip_tags({"nop": 1, "skip_tags": "1,2"}) == ["1", "2"] - assert find_skip_tags({"nop": 1, "skip_tags": "1, 2"}) == ["1", "2"] - assert find_skip_tags({"nop": 1, "skip_tags": "1, 2,"}) == ["1", "2"] - - -def test_should_include_stage(): - assert should_include_stage({"tags": ["a", "b"]}, []) - assert should_include_stage({"tags": ["a", "b"]}, ["a"]) - assert should_include_stage({"tags": ["a", "b"]}, ["b"]) - assert should_include_stage({"tags": ["a", "b"]}, ["a", "b"]) - assert should_include_stage({"tags": ["a", "b"]}, ["b", "a"]) - - assert should_include_stage({"tags": ["a", "b"]}, ["a", "c"]) - assert should_include_stage({"tags": ["a", "b"]}, ["b", "c"]) - - assert not should_include_stage({"tags": ["a", "b"]}, ["c"]) - assert not should_include_stage({"tags": ["b"]}, ["c"]) - assert not should_include_stage({"tags": []}, ["c"]) - - -def test_should_skip_stage(): - assert should_skip_stage({"tags": ["a", "b"]}, ["a"]) - assert should_skip_stage({"tags": ["a", "b"]}, ["a", "b"]) - assert should_skip_stage({"tags": ["a", "b"]}, ["a", "b", "c"]) - - assert not should_skip_stage({"tags": []}, []) - assert not should_skip_stage({"tags": []}, ["a"]) - assert not should_skip_stage({"tags": []}, ["a", "b"]) - assert not should_skip_stage({"tags": ["a"]}, ["b"]) - assert not should_skip_stage({"tags": ["a", "b"]}, []) - assert not should_skip_stage({"tags": ["a", "b"]}, ["c"]) - - -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -@patch("sonar.sonar.create_ecr_repository") -def test_include_tags_tag0( - _create_ecr_repository, - _docker_build, - _docker_tag, - _docker_push, - ys3, -): - """Only includes the stage with the corresponding tag.""" - - with patch("builtins.open", mock_open(read_data=ys3)) as mock_file: - pipeline = process_image( - image_name="image0", - skip_tags=[], - include_tags=["tag0"], - build_args={}, - build_options={"pipeline": True}, - ) - - assert "skipping-stage" not in pipeline["image0"]["stage0"] - assert pipeline["image0"]["stage1"] == {"skipping-stage": "stage1"} - - -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -@patch("sonar.sonar.create_ecr_repository") -def test_include_tags_tag0_tag1(_create_ecr_repository, _docker_build, _docker_tag, _docker_push, ys3): - """Only includes the stage with the corresponding tag.""" - with patch("builtins.open", mock_open(read_data=ys3)) as mock_file: - pipeline = process_image( - image_name="image0", - skip_tags=[], - include_tags=["tag0", "tag1"], - build_args={}, - build_options={"pipeline": True}, - ) - - assert "skipping-stage" not in pipeline["image0"]["stage0"] - assert "skipping-stage" not in pipeline["image0"]["stage1"] - - -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -@patch("sonar.sonar.create_ecr_repository") -def test_skip_tags1(_create_ecr_repository, _docker_build, _docker_tag, _docker_push, ys3): - """Only includes the stage with the corresponding tag.""" - with patch("builtins.open", mock_open(read_data=ys3)) as mock_file: - pipeline = process_image( - image_name="image0", - skip_tags=["tag0"], - include_tags=[], - build_args={}, - build_options={"pipeline": True}, - ) - - assert pipeline["image0"]["stage0"] == {"skipping-stage": "stage0"} - assert "skipping-stage" not in pipeline["image0"]["stage1"] - - -def test_skip_tags2(ys3): - """Only includes the stage with the corresponding tag.""" - with patch("builtins.open", mock_open(read_data=ys3)) as mock_file: - pipeline = process_image( - image_name="image0", - skip_tags=["tag0", "tag1"], - include_tags=[], - build_args={}, - build_options={"pipeline": True}, - ) - - assert pipeline["image0"]["stage0"] == {"skipping-stage": "stage0"} - assert pipeline["image0"]["stage1"] == {"skipping-stage": "stage1"} - - -@patch("sonar.sonar.docker_push") -@patch("sonar.sonar.docker_tag") -@patch("sonar.sonar.docker_build") -@patch("sonar.sonar.create_ecr_repository") -def test_skip_include_tags(_create_ecr_repository, _docker_build, _docker_tag, _docker_push, ys3): - """Only includes the stage with the corresponding tag.""" - - with patch("builtins.open", mock_open(read_data=ys3)) as mock_file: - pipeline = process_image( - image_name="image0", - skip_tags=["tag0"], - include_tags=["tag1"], - build_args={}, - build_options={"pipeline": True}, - ) - - assert pipeline["image0"]["stage0"] == {"skipping-stage": "stage0"} - assert "skipping-stage" not in pipeline["image0"]["stage1"] diff --git a/lib/sonar/test/test_template.py b/lib/sonar/test/test_template.py deleted file mode 100644 index 410f5ee26..000000000 --- a/lib/sonar/test/test_template.py +++ /dev/null @@ -1,16 +0,0 @@ -from unittest.mock import Mock, patch - -from ..sonar import process_image - - -@patch("sonar.sonar.render", return_value="") -def test_key_error_is_not_raised_on_empty_inputs(patched_render: Mock): - process_image( - image_name="image1", - skip_tags=[], - include_tags=[], - build_args={}, - build_options={}, - inventory="lib/sonar/test/yaml_scenario10.yaml", - ) - patched_render.assert_called() diff --git a/lib/sonar/test/yaml_scenario0.yaml b/lib/sonar/test/yaml_scenario0.yaml deleted file mode 100644 index 820f826e0..000000000 --- a/lib/sonar/test/yaml_scenario0.yaml +++ /dev/null @@ -1,4 +0,0 @@ -images: - - name: image0 - inputs: - - input0 diff --git a/lib/sonar/test/yaml_scenario1.yaml b/lib/sonar/test/yaml_scenario1.yaml deleted file mode 100644 index 442e5c38c..000000000 --- a/lib/sonar/test/yaml_scenario1.yaml +++ /dev/null @@ -1,19 +0,0 @@ -vars: - registry: somereg - -images: - - name: image0 - vars: - context: . - - inputs: - - input0 - - stages: - - name: stage0 - task_type: docker_build - - dockerfile: Dockerfile - output: - - registry: $(inputs.params.registry)/something - tag: something diff --git a/lib/sonar/test/yaml_scenario10.yaml b/lib/sonar/test/yaml_scenario10.yaml deleted file mode 100644 index 8b239f4b3..000000000 --- a/lib/sonar/test/yaml_scenario10.yaml +++ /dev/null @@ -1,17 +0,0 @@ -vars: - registry: somereg - -images: - - name: image1 - vars: - context: . - - stages: - - name: stage0 - task_type: dockerfile_template - - - dockerfile: Dockerfile - output: - - registry: $(inputs.params.version_id)/something - tag: something diff --git a/lib/sonar/test/yaml_scenario11.yaml b/lib/sonar/test/yaml_scenario11.yaml deleted file mode 100644 index 7d02b7ff3..000000000 --- a/lib/sonar/test/yaml_scenario11.yaml +++ /dev/null @@ -1,43 +0,0 @@ -vars: - registry: somereg - -images: - - name: image1 - vars: - context: . - - inputs: - - input0 - - platform: linux/amd64 - - stages: - - name: stage0 - task_type: docker_build - - labels: - label-0: value-0 - - dockerfile: Dockerfile - output: - - registry: $(inputs.params.registry)/something - tag: something - - - name: image2 - vars: - context: . - - inputs: - - input0 - - stages: - - name: stage0 - task_type: docker_build - - labels: - label-1: value-1 - - dockerfile: Dockerfile - output: - - registry: $(inputs.params.registry)/something - tag: something diff --git a/lib/sonar/test/yaml_scenario2.yaml b/lib/sonar/test/yaml_scenario2.yaml deleted file mode 100644 index 8f58914cd..000000000 --- a/lib/sonar/test/yaml_scenario2.yaml +++ /dev/null @@ -1,26 +0,0 @@ -vars: - inventory_var0: inventory_var_value0 - inventory_var1: inventory_var_value1 - -images: - - name: image0 - vars: - image_var0: image_var_value0 - image_var1: image_var_value1 - - inputs: - - image_input0 - - image_input1 - - stages: - - name: stage0 - task_type: docker_build - - vars: - stage_var0: stage_value0 - stage_var1: stage_value1 - - dockerfile: Dockerfile - output: - - registry: $(inputs.params.registry)/something - tag: something diff --git a/lib/sonar/test/yaml_scenario3.yaml b/lib/sonar/test/yaml_scenario3.yaml deleted file mode 100644 index 39d5f3af2..000000000 --- a/lib/sonar/test/yaml_scenario3.yaml +++ /dev/null @@ -1,44 +0,0 @@ -vars: - inventory_var0: inventory_var_value0 - inventory_var1: inventory_var_value1 - -images: - - name: image0 - vars: - image_var0: image_var_value0 - image_var1: image_var_value1 - - context: some-context - - inputs: - - image_input0 - - image_input1 - - stages: - - name: stage0 - task_type: docker_build - - vars: - stage_var0: stage_value0 - stage_var1: stage_value1 - - tags: ["tag0"] - - dockerfile: Dockerfile - output: - - registry: some-registry - tag: something - - - name: stage1 - task_type: docker_build - - vars: - stage_var0: stage_value0 - stage_var1: stage_value1 - - tags: ["tag1"] - - dockerfile: Dockerfile - output: - - registry: some-registry - tag: something diff --git a/lib/sonar/test/yaml_scenario4.yaml b/lib/sonar/test/yaml_scenario4.yaml deleted file mode 100644 index e6984f184..000000000 --- a/lib/sonar/test/yaml_scenario4.yaml +++ /dev/null @@ -1,23 +0,0 @@ -vars: - test_var0: test_value0 - test_var1: test_value1 - -images: - - name: image0 - vars: - context: some-context - - stages: - - name: stage0 - task_type: tag_image - - source: - registry: source-registry-0-$(inputs.params.test_var0) - tag: source-tag-0-$(inputs.params.test_var1) - - destination: - - registry: dest-registry-0-$(inputs.params.test_var0) - tag: dest-tag-0-$(inputs.params.test_var0)-$(inputs.params.test_var1) - - - registry: dest-registry-1-$(inputs.params.test_var0) - tag: dest-tag-1-$(inputs.params.test_var0)-$(inputs.params.test_var1) diff --git a/lib/sonar/test/yaml_scenario6.yaml b/lib/sonar/test/yaml_scenario6.yaml deleted file mode 100644 index 888bf2d8d..000000000 --- a/lib/sonar/test/yaml_scenario6.yaml +++ /dev/null @@ -1,46 +0,0 @@ -images: - - name: image0 - vars: - context: some-context - - stages: - - name: stage0 - tags: ["test_dockerfile_from_url"] - task_type: docker_build - - dockerfile: https://somedomain/dockerfile - output: - - registry: some-registry - tag: something - - - name: image1 - vars: - context: some-context - - stages: - - name: stage0 - task_type: docker_build - tags: ["test_continue_on_errors"] - - dockerfile: somedockerfile - output: - - registry: some-registry - tag: something - - - name: stage1 - task_type: docker_build - tags: ["test_continue_on_errors"] - - dockerfile: somedockerfile - output: - - registry: some-registry - tag: something - - - name: stage2 - task_type: docker_build - tags: ["test_continue_on_errors"] - - dockerfile: somedockerfile - output: - - registry: some-registry - tag: something diff --git a/lib/sonar/test/yaml_scenario7.yaml b/lib/sonar/test/yaml_scenario7.yaml deleted file mode 100644 index 3a36d68ba..000000000 --- a/lib/sonar/test/yaml_scenario7.yaml +++ /dev/null @@ -1,15 +0,0 @@ -images: -- name: image0 - vars: - context: some-context - stages: - - name: stage-build0 - task_type: docker_build - dockerfile: https://somedomain/dockerfile - output: - - registry: foo - tag: bar - signer_name: evergreen_ci - key_secret_name: test/kube/secret - passphrase_secret_name: test/kube/passphrase - region: us-east-1 diff --git a/lib/sonar/test/yaml_scenario8.yaml b/lib/sonar/test/yaml_scenario8.yaml deleted file mode 100644 index d2656122c..000000000 --- a/lib/sonar/test/yaml_scenario8.yaml +++ /dev/null @@ -1,27 +0,0 @@ -images: -- name: image0 - vars: - context: some-context - stages: - - name: stage-build0 - task_type: docker_build - dockerfile: https://somedomain/dockerfile - - signing: &signing - signer_name: evergreen_ci - key_secret_name: test/kube/secret - passphrase_secret_name: test/kube/passphrase - region: us-east-1 - - - output: - - registry: foo - tag: bar - <<: *signing - - registry: foo2 - tag: bar2 - <<: *signing - - registry: foo3 - tag: bar3 - <<: *signing - signer_name: evergreen_ci_foo diff --git a/lib/sonar/test/yaml_scenario9.yaml b/lib/sonar/test/yaml_scenario9.yaml deleted file mode 100644 index 1439d52a6..000000000 --- a/lib/sonar/test/yaml_scenario9.yaml +++ /dev/null @@ -1,24 +0,0 @@ -vars: - registry: somereg - -images: - - name: image1 - vars: - context: . - - inputs: - - input0 - - stages: - - name: stage0 - task_type: docker_build - - labels: - label-0: value-0 - label-1: value-1 - label-2: value-2 - - dockerfile: Dockerfile - output: - - registry: $(inputs.params.registry)/something - tag: something diff --git a/pipeline.py b/pipeline.py deleted file mode 100755 index 5156992b1..000000000 --- a/pipeline.py +++ /dev/null @@ -1,1506 +0,0 @@ -#!/usr/bin/env python3 - -"""This pipeline script knows about the details of our Docker images -and where to fetch and calculate parameters. It uses Sonar.py -to produce the final images.""" - -import argparse -import copy -import json -import os -import random -import shutil -import subprocess -import sys -import tarfile -import time -import traceback -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from dataclasses import dataclass -from datetime import datetime, timedelta, timezone -from queue import Queue -from typing import Callable, Dict, Iterable, List, Optional, Set, Tuple, Union - -import requests -import semver -from opentelemetry import context -from opentelemetry import context as otel_context -from opentelemetry import trace -from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( - OTLPSpanExporter as OTLPSpanGrpcExporter, -) -from opentelemetry.sdk.resources import SERVICE_NAME, Resource -from opentelemetry.sdk.trace import ( - SynchronousMultiSpanProcessor, - Tracer, - TracerProvider, -) -from opentelemetry.sdk.trace.export import BatchSpanProcessor -from opentelemetry.trace import NonRecordingSpan, SpanContext, TraceFlags -from packaging.version import Version - -import docker -from lib.base_logger import logger -from lib.sonar.sonar import process_image -from scripts.evergreen.release.agent_matrix import ( - get_supported_version_for_image, -) -from scripts.evergreen.release.sbom import generate_sbom, generate_sbom_for_cli -from scripts.release.agent.detect_ops_manager_changes import detect_ops_manager_changes -from scripts.release.build.image_signing import ( - mongodb_artifactory_login, - sign_image, - verify_signature, -) - -TRACER = trace.get_tracer("evergreen-agent") - - -def _setup_tracing(): - trace_id = os.environ.get("otel_trace_id") - parent_id = os.environ.get("otel_parent_id") - endpoint = os.environ.get("otel_collector_endpoint") - if any(value is None for value in [trace_id, parent_id, endpoint]): - logger.info("tracing environment variables are missing, not configuring tracing") - return - logger.info(f"parent_id is {parent_id}") - logger.info(f"trace_id is {trace_id}") - logger.info(f"endpoint is {endpoint}") - span_context = SpanContext( - trace_id=int(trace_id, 16), - span_id=int(parent_id, 16), - is_remote=False, - # This flag ensures the span is sampled and sent to the collector - trace_flags=TraceFlags(0x01), - ) - ctx = trace.set_span_in_context(NonRecordingSpan(span_context)) - context.attach(ctx) - sp = SynchronousMultiSpanProcessor() - span_processor = BatchSpanProcessor( - OTLPSpanGrpcExporter( - endpoint=endpoint, - ) - ) - sp.add_span_processor(span_processor) - resource = Resource(attributes={SERVICE_NAME: "evergreen-agent"}) - provider = TracerProvider(resource=resource, active_span_processor=sp) - trace.set_tracer_provider(provider) - - -DEFAULT_IMAGE_TYPE = "ubi" -DEFAULT_NAMESPACE = "default" - -# QUAY_REGISTRY_URL sets the base registry for all release build stages. Context images and daily builds will push the -# final images to the registry specified here. -# This makes it easy to use ECR to test changes on the pipeline before pushing to Quay. -QUAY_REGISTRY_URL = os.environ.get("QUAY_REGISTRY", "quay.io/mongodb") - - -@dataclass -class BuildConfiguration: - image_type: str - base_repository: str - namespace: str - - include_tags: list[str] - skip_tags: list[str] - - builder: str = "docker" - parallel: bool = False - parallel_factor: int = 0 - architecture: Optional[List[str]] = None - sign: bool = False - all_agents: bool = False - agent_to_build: str = "" - - pipeline: bool = True - debug: bool = True - - def build_args(self, args: Optional[Dict[str, str]] = None) -> Dict[str, str]: - if args is None: - args = {} - args = args.copy() - - args["registry"] = self.base_repository - - return args - - def get_skip_tags(self) -> list[str]: - return make_list_of_str(self.skip_tags) - - def get_include_tags(self) -> list[str]: - return make_list_of_str(self.include_tags) - - def is_release_step_executed(self) -> bool: - if "release" in self.get_skip_tags(): - return False - if "release" in self.get_include_tags(): - return True - return len(self.get_include_tags()) == 0 - - -def make_list_of_str(value: Union[None, str, List[str]]) -> List[str]: - if value is None: - return [] - - if isinstance(value, str): - return [e.strip() for e in value.split(",")] - - return value - - -def get_tools_distro(tools_version: str) -> Dict[str, str]: - new_rhel_tool_version = "100.10.0" - default_distro = {"arm": "rhel90-aarch64", "amd": "rhel90-x86_64"} - if Version(tools_version) >= Version(new_rhel_tool_version): - return {"arm": "rhel93-aarch64", "amd": "rhel93-x86_64"} - return default_distro - - -def operator_build_configuration( - builder: str, - parallel: bool, - debug: bool, - architecture: Optional[List[str]] = None, - sign: bool = False, - all_agents: bool = False, - parallel_factor: int = 0, - agent_to_build: str = "", -) -> BuildConfiguration: - bc = BuildConfiguration( - image_type=os.environ.get("distro", DEFAULT_IMAGE_TYPE), - base_repository=os.environ["BASE_REPO_URL"], - namespace=os.environ.get("namespace", DEFAULT_NAMESPACE), - skip_tags=make_list_of_str(os.environ.get("skip_tags")), - include_tags=make_list_of_str(os.environ.get("include_tags")), - builder=builder, - parallel=parallel, - all_agents=all_agents or bool(os.environ.get("all_agents", False)), - debug=debug, - architecture=architecture, - sign=sign, - parallel_factor=parallel_factor, - agent_to_build=agent_to_build, - ) - - logger.info(f"is_running_in_patch: {is_running_in_patch()}") - logger.info(f"is_running_in_evg_pipeline: {is_running_in_evg_pipeline()}") - if is_running_in_patch() or not is_running_in_evg_pipeline(): - logger.info( - f"Running build not in evg pipeline (is_running_in_evg_pipeline={is_running_in_evg_pipeline()}) " - f"or in pipeline but not from master (is_running_in_patch={is_running_in_patch()}). " - "Adding 'master' tag to skip to prevent publishing to the latest dev image." - ) - bc.skip_tags.append("master") - - return bc - - -def is_running_in_evg_pipeline(): - return os.getenv("RUNNING_IN_EVG", "") == "true" - - -class MissingEnvironmentVariable(Exception): - pass - - -def should_pin_at() -> Optional[Tuple[str, str]]: - """Gets the value of the pin_tag_at to tag the images with. - - Returns its value split on :. - """ - # We need to return something so `partition` does not raise - # AttributeError - is_patch = is_running_in_patch() - - try: - pinned = os.environ["pin_tag_at"] - except KeyError: - raise MissingEnvironmentVariable(f"pin_tag_at environment variable does not exist, but is required") - if is_patch: - if pinned == "00:00": - raise Exception("Pinning to midnight during a patch is not supported. Please pin to another date!") - - hour, _, minute = pinned.partition(":") - return hour, minute - - -def is_running_in_patch(): - is_patch = os.environ.get("is_patch") - return is_patch is not None and is_patch.lower() == "true" - - -def build_id() -> str: - """Returns the current UTC time in ISO8601 date format. - - If running in Evergreen and `created_at` expansion is defined, use the - datetime defined in that variable instead. - - It is possible to pin this time at midnight (00:00) for periodic builds. If - running a manual build, then the Evergreen `pin_tag_at` variable needs to be - set to the empty string, in which case, the image tag suffix will correspond - to the current timestamp. - - """ - - date = datetime.now(timezone.utc) - try: - created_at = os.environ["created_at"] - date = datetime.strptime(created_at, "%y_%m_%d_%H_%M_%S") - except KeyError: - pass - - hour, minute = should_pin_at() - if hour and minute: - logger.info(f"we are pinning to, hour: {hour}, minute: {minute}") - date = date.replace(hour=int(hour), minute=int(minute), second=0) - else: - logger.warning(f"hour and minute cannot be extracted from provided pin_tag_at env, pinning to now") - - string_time = date.strftime("%Y%m%dT%H%M%SZ") - - return string_time - - -def get_release() -> Dict: - with open("release.json") as release: - return json.load(release) - - -def get_git_release_tag() -> str: - """Returns the git tag of the current run on releases, on non-release returns the patch id.""" - release_env_var = os.getenv("triggered_by_git_tag") - - # that means we are in a release and only return the git_tag; otherwise we want to return the patch_id - # appended to ensure the image created is unique and does not interfere - if release_env_var is not None: - logger.info(f"git tag detected: {release_env_var}") - return release_env_var - - patch_id = os.environ.get("version_id", "latest") - logger.info(f"No git tag detected, using patch_id: {patch_id}") - return patch_id - - -def copy_into_container(client, src, dst): - """Copies a local file into a running container.""" - - os.chdir(os.path.dirname(src)) - srcname = os.path.basename(src) - with tarfile.open(src + ".tar", mode="w") as tar: - tar.add(srcname) - - name, dst = dst.split(":") - container = client.containers.get(name) - - with open(src + ".tar", "rb") as fd: - container.put_archive(os.path.dirname(dst), fd.read()) - - -def create_and_push_manifest(image: str, tag: str, architectures: list[str]) -> None: - """ - Generates docker manifests by running the following commands: - 1. Clear existing manifests - docker manifest rm config.repo_url/image:tag - 2. Create the manifest - docker manifest create config.repo_url/image:tag --amend config.repo_url/image:tag-amd64 --amend config.repo_url/image:tag-arm64 - 3. Push the manifest - docker manifest push config.repo_url/image:tag - - This method calls docker directly on the command line, this is different from the rest of the code which uses - Sonar as an interface to docker. We decided to keep this asymmetry for now, as Sonar will be removed soon. - """ - docker_cmd = shutil.which("docker") - if docker_cmd is None: - raise Exception("Docker executable not found in PATH") - - final_manifest = image + ":" + tag - - args = [ - docker_cmd, - "manifest", - "create", - final_manifest, - ] - - for arch in architectures: - args.extend(["--amend", f"{final_manifest}-{arch}"]) - - args_str = " ".join(args) - logger.debug(f"creating new manifest: {args_str}") - cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - if cp.returncode != 0: - raise Exception(cp.stderr) - - args = [docker_cmd, "manifest", "push", final_manifest] - args_str = " ".join(args) - logger.info(f"pushing new manifest: {args_str}") - cp = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - - if cp.returncode != 0: - raise Exception(cp.stderr) - - -def try_get_platform_data(client, image): - """Helper function to try and retrieve platform data.""" - try: - return client.images.get_registry_data(image) - except Exception as e: - logger.error("Failed to get registry data for image: {0}. Error: {1}".format(image, str(e))) - return None - - -def check_multi_arch(image: str, suffix: str) -> bool: - """ - Checks if a docker image supports AMD and ARM platforms by inspecting the registry data. - - :param str image: The image name and tag - """ - client = docker.from_env() - platforms = ["linux/amd64", "linux/arm64"] - - for img in [image, image + suffix]: - reg_data = try_get_platform_data(client, img) - if reg_data is not None and all(reg_data.has_platform(p) for p in platforms): - logger.info("Base image {} supports multi architecture, building for ARM64 and AMD64".format(img)) - return True - - logger.info("Base image {} is single-arch, building only for AMD64.".format(img)) - return False - - -@TRACER.start_as_current_span("sonar_build_image") -def sonar_build_image( - image_name: str, - build_configuration: BuildConfiguration, - args: Dict[str, str] = None, - inventory="inventory.yaml", - with_sbom: bool = True, -): - """Calls sonar to build `image_name` with arguments defined in `args`.""" - span = trace.get_current_span() - span.set_attribute("mck.image_name", image_name) - span.set_attribute("mck.inventory", inventory) - if args: - span.set_attribute("mck.build_args", str(args)) - - build_options = { - # Will continue building an image if it finds an error. See next comment. - "continue_on_errors": True, - # But will still fail after all the tasks have completed - "fail_on_errors": True, - "pipeline": build_configuration.pipeline, - } - - logger.info(f"Sonar config bc: {build_configuration}, args: {args}, for image: {image_name}") - - process_image( - image_name, - skip_tags=build_configuration.get_skip_tags(), - include_tags=build_configuration.get_include_tags(), - build_args=build_configuration.build_args(args), - inventory=inventory, - build_options=build_options, - ) - - if with_sbom: - produce_sbom(build_configuration, args) - - -@TRACER.start_as_current_span("produce_sbom") -def produce_sbom(build_configuration, args): - span = trace.get_current_span() - if not is_running_in_evg_pipeline(): - logger.info("Skipping SBOM Generation (enabled only for EVG)") - return - - try: - image_pull_spec = args["quay_registry"] + args.get("ubi_suffix", "") - except KeyError: - logger.error(f"Could not find image pull spec. Args: {args}, BuildConfiguration: {build_configuration}") - logger.error(f"Skipping SBOM generation") - return - - try: - image_tag = args["release_version"] - span.set_attribute("mck.release_version", image_tag) - except KeyError: - logger.error(f"Could not find image tag. Args: {args}, BuildConfiguration: {build_configuration}") - logger.error(f"Skipping SBOM generation") - return - - image_pull_spec = f"{image_pull_spec}:{image_tag}" - print(f"Producing SBOM for image: {image_pull_spec} args: {args}") - - if "platform" in args: - if args["platform"] == "arm64": - platform = "linux/arm64" - elif args["platform"] == "amd64": - platform = "linux/amd64" - else: - # TODO: return here? - logger.error(f"Unrecognized architectures in {args}. Skipping SBOM generation") - else: - platform = "linux/amd64" - - generate_sbom(image_pull_spec, platform) - - -def build_tests_image(build_configuration: BuildConfiguration): - """ - Builds image used to run tests. - """ - image_name = "test" - - # helm directory needs to be copied over to the tests docker context. - helm_src = "helm_chart" - helm_dest = "docker/mongodb-kubernetes-tests/helm_chart" - requirements_dest = "docker/mongodb-kubernetes-tests/requirements.txt" - public_src = "public" - public_dest = "docker/mongodb-kubernetes-tests/public" - - # Remove existing directories/files if they exist - shutil.rmtree(helm_dest, ignore_errors=True) - shutil.rmtree(public_dest, ignore_errors=True) - - # Copy directories and files (recursive copy) - shutil.copytree(helm_src, helm_dest) - shutil.copytree(public_src, public_dest) - shutil.copyfile("release.json", "docker/mongodb-kubernetes-tests/release.json") - shutil.copyfile("requirements.txt", requirements_dest) - - python_version = os.getenv("PYTHON_VERSION", "") - if python_version == "": - raise Exception("Missing PYTHON_VERSION environment variable") - - buildargs = dict({"python_version": python_version}) - - sonar_build_image(image_name, build_configuration, buildargs, "inventories/test.yaml") - - -def build_mco_tests_image(build_configuration: BuildConfiguration): - """ - Builds image used to run community tests. - """ - image_name = "community-operator-e2e" - golang_version = os.getenv("GOLANG_VERSION", "1.24") - if golang_version == "": - raise Exception("Missing GOLANG_VERSION environment variable") - - buildargs = dict({"golang_version": golang_version}) - - sonar_build_image(image_name, build_configuration, buildargs, "inventories/mco_test.yaml") - - -TRACER.start_as_current_span("build_operator_image") - - -def build_operator_image(build_configuration: BuildConfiguration): - """Calculates arguments required to build the operator image, and starts the build process.""" - # In evergreen, we can pass test_suffix env to publish the operator to a quay - # repository with a given suffix. - test_suffix = os.environ.get("test_suffix", "") - log_automation_config_diff = os.environ.get("LOG_AUTOMATION_CONFIG_DIFF", "false") - version = get_git_release_tag() - - # Use only amd64 if we should skip arm64 builds - if should_skip_arm64(build_configuration): - architectures = ["amd64"] - logger.info("Skipping ARM64 builds for operator image as this is running in EVG pipeline as a patch") - else: - architectures = build_configuration.architecture or ["amd64", "arm64"] - - multi_arch_args_list = [] - - for arch in architectures: - arch_args = { - "version": version, - "log_automation_config_diff": log_automation_config_diff, - "test_suffix": test_suffix, - "debug": build_configuration.debug, - "architecture": arch, - } - multi_arch_args_list.append(arch_args) - - logger.info(f"Building Operator args: {multi_arch_args_list}") - - image_name = "mongodb-kubernetes" - - current_span = trace.get_current_span() - current_span.set_attribute("mck.image_name", image_name) - current_span.set_attribute("mck.architecture", architectures) - - build_image_generic( - config=build_configuration, - image_name=image_name, - inventory_file="inventory.yaml", - multi_arch_args_list=multi_arch_args_list, - with_image_base=False, - is_multi_arch=True, - ) - - -def build_database_image(build_configuration: BuildConfiguration): - """ - Builds a new database image. - """ - release = get_release() - version = release["databaseImageVersion"] - args = {"version": version} - build_image_generic(build_configuration, "database", "inventories/database.yaml", args) - - -def build_CLI_SBOM(build_configuration: BuildConfiguration): - if not is_running_in_evg_pipeline(): - logger.info("Skipping SBOM Generation (enabled only for EVG)") - return - - if build_configuration.architecture is None or len(build_configuration.architecture) == 0: - architectures = ["linux/amd64", "linux/arm64", "darwin/arm64", "darwin/amd64"] - elif "arm64" in build_configuration.architecture: - architectures = ["linux/arm64", "darwin/arm64"] - elif "amd64" in build_configuration.architecture: - architectures = ["linux/amd64", "darwin/amd64"] - else: - logger.error(f"Unrecognized architectures {build_configuration.architecture}. Skipping SBOM generation") - return - - release = get_release() - version = release["mongodbOperator"] - - for architecture in architectures: - generate_sbom_for_cli(version, architecture) - - -def build_operator_image_patch(build_configuration: BuildConfiguration): - """This function builds the operator locally and pushed into an existing - Docker image. This is the fastest way I could image we can do this.""" - - client = docker.from_env() - # image that we know is where we build operator. - image_repo = build_configuration.base_repository + "/" + build_configuration.image_type + "/mongodb-kubernetes" - image_tag = "latest" - repo_tag = image_repo + ":" + image_tag - - logger.debug(f"Pulling image: {repo_tag}") - try: - image = client.images.get(repo_tag) - except docker.errors.ImageNotFound: - logger.debug("Operator image does not exist locally. Building it now") - build_operator_image(build_configuration) - return - - logger.debug("Done") - too_old = datetime.now() - timedelta(hours=3) - image_timestamp = datetime.fromtimestamp( - image.history()[0]["Created"] - ) # Layer 0 is the latest added layer to this Docker image. [-1] is the FROM layer. - - if image_timestamp < too_old: - logger.info("Current operator image is too old, will rebuild it completely first") - build_operator_image(build_configuration) - return - - container_name = "mongodb-enterprise-operator" - operator_binary_location = "/usr/local/bin/mongodb-kubernetes-operator" - try: - client.containers.get(container_name).remove() - logger.debug(f"Removed {container_name}") - except docker.errors.NotFound: - pass - - container = client.containers.run(repo_tag, name=container_name, entrypoint="sh", detach=True) - - logger.debug("Building operator with debugging symbols") - subprocess.run(["make", "manager"], check=True, stdout=subprocess.PIPE) - logger.debug("Done building the operator") - - copy_into_container( - client, - os.getcwd() + "/docker/mongodb-kubernetes-operator/content/mongodb-kubernetes-operator", - container_name + ":" + operator_binary_location, - ) - - # Commit changes on disk as a tag - container.commit( - repository=image_repo, - tag=image_tag, - ) - # Stop this container so we can use it next time - container.stop() - container.remove() - - logger.info("Pushing operator to {}:{}".format(image_repo, image_tag)) - client.images.push( - repository=image_repo, - tag=image_tag, - ) - - -def get_supported_variants_for_image(image: str) -> List[str]: - return get_release()["supportedImages"][image]["variants"] - - -def image_config( - image_name: str, - name_prefix: str = "mongodb-kubernetes-", - s3_bucket: str = "enterprise-operator-dockerfiles", - ubi_suffix: str = "-ubi", - base_suffix: str = "", -) -> Tuple[str, Dict[str, str]]: - """Generates configuration for an image suitable to be passed - to Sonar. - - It returns a dictionary with registries and S3 configuration.""" - args = { - "quay_registry": "{}/{}{}".format(QUAY_REGISTRY_URL, name_prefix, image_name), - "ecr_registry_ubi": "268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/{}{}".format(name_prefix, image_name), - "s3_bucket_http": "https://{}.s3.amazonaws.com/dockerfiles/{}{}".format(s3_bucket, name_prefix, image_name), - "ubi_suffix": ubi_suffix, - "base_suffix": base_suffix, - } - - return image_name, args - - -def args_for_daily_image(image_name: str) -> Dict[str, str]: - """Returns configuration for an image to be able to be pushed with Sonar. - - This includes the quay_registry and ospid corresponding to RedHat's project id. - """ - image_configs = [ - image_config("database", ubi_suffix=""), - image_config("init-appdb", ubi_suffix=""), - image_config("agent", name_prefix="mongodb-enterprise-"), - image_config("init-database", ubi_suffix=""), - image_config("init-ops-manager", ubi_suffix=""), - image_config("mongodb-kubernetes", name_prefix="", ubi_suffix=""), - image_config("ops-manager", name_prefix="mongodb-enterprise-"), - image_config( - image_name="mongodb-kubernetes-operator", - name_prefix="", - s3_bucket="enterprise-operator-dockerfiles", - # community ubi image does not have a suffix in its name - ubi_suffix="", - ), - image_config( - image_name="readinessprobe", - ubi_suffix="", - s3_bucket="enterprise-operator-dockerfiles", - ), - image_config( - image_name="operator-version-upgrade-post-start-hook", - ubi_suffix="", - s3_bucket="enterprise-operator-dockerfiles", - ), - image_config( - image_name="mongodb-agent", - name_prefix="", - s3_bucket="enterprise-operator-dockerfiles", - ubi_suffix="-ubi", - base_suffix="-ubi", - ), - ] - - images = {k: v for k, v in image_configs} - return images[image_name] - - -def is_version_in_range(version: str, min_version: str, max_version: str) -> bool: - """Check if the version is in the range""" - try: - parsed_version = semver.VersionInfo.parse(version) - if parsed_version.prerelease: - logger.info(f"Excluding {version} from range {min_version}-{max_version} because it's a pre-release") - return False - version_without_rc = semver.VersionInfo.finalize_version(parsed_version) - except ValueError: - version_without_rc = version - if min_version and max_version: - return version_without_rc.match(">=" + min_version) and version_without_rc.match("<" + max_version) - return True - - -def get_versions_to_rebuild(supported_versions, min_version, max_version): - # this means we only want to release one version, we cannot rely on the below range function - # since the agent does not follow semver for comparison - if (min_version and max_version) and (min_version == max_version): - return [min_version] - return filter(lambda x: is_version_in_range(x, min_version, max_version), supported_versions) - - -def get_versions_to_rebuild_per_operator_version(supported_versions, operator_version): - """ - This function returns all versions sliced by a specific operator version. - If the input is `onlyAgents` then it only returns agents without the operator suffix. - """ - versions_to_rebuild = [] - - for version in supported_versions: - if operator_version == "onlyAgents": - # 1_ works because we append the operator version via "_", all agents end with "1". - if "1_" not in version: - versions_to_rebuild.append(version) - else: - if operator_version in version: - versions_to_rebuild.append(version) - return versions_to_rebuild - - -class TracedThreadPoolExecutor(ThreadPoolExecutor): - """Implementation of :class:ThreadPoolExecutor that will pass context into sub tasks.""" - - def __init__(self, tracer: Tracer, *args, **kwargs): - self.tracer = tracer - super().__init__(*args, **kwargs) - - def with_otel_context(self, c: otel_context.Context, fn: Callable): - otel_context.attach(c) - return fn() - - def submit(self, fn, *args, **kwargs): - """Submit a new task to the thread pool.""" - - # get the current otel context - c = otel_context.get_current() - if c: - return super().submit( - lambda: self.with_otel_context(c, lambda: fn(*args, **kwargs)), - ) - else: - return super().submit(lambda: fn(*args, **kwargs)) - - -def should_skip_arm64(config: BuildConfiguration) -> bool: - """ - Determines if arm64 builds should be skipped based on environment. - Determines if arm64 builds should be skipped based on BuildConfiguration or environment.``` - And skipping the evergreen detail. - """ - if config.is_release_step_executed(): - return False - - return is_running_in_evg_pipeline() and is_running_in_patch() - - -def build_image_daily( - image_name: str, # corresponds to the image_name in the release.json - min_version: str = None, - max_version: str = None, - operator_version: str = None, -): - """ - Starts the daily build process for an image. This function works for all images we support, for community and - enterprise operator. The list of supported image_name is defined in get_builder_function_for_image_name. - Builds an image for each version listed in ./release.json - The registry used to pull base image and output the daily build is configured in the image_config function, it is passed - as an argument to the inventories/daily.yaml file. - - If the context image supports both ARM and AMD architectures, both will be built. - """ - - def get_architectures_set(build_configuration, args): - """Determine the set of architectures to build for""" - arch_set = set(build_configuration.architecture) if build_configuration.architecture else set() - if arch_set == {"arm64"}: - raise ValueError("Building for ARM64 only is not supported yet") - - if should_skip_arm64(build_configuration): - logger.info("Skipping ARM64 builds as this is running in as a patch and not a release step.") - return {"amd64"} - - # Automatic architecture detection is the default behavior if 'arch' argument isn't specified - if arch_set == set(): - if check_multi_arch( - image=args["quay_registry"] + args["ubi_suffix"] + ":" + args["release_version"], - suffix="-context", - ): - arch_set = {"amd64", "arm64"} - else: - # When nothing specified and single-arch, default to amd64 - arch_set = {"amd64"} - - return arch_set - - def create_and_push_manifests(args: dict, architectures: list[str]): - """Create and push manifests for all registries.""" - registries = [args["ecr_registry_ubi"], args["quay_registry"]] - tags = [args["release_version"], args["release_version"] + "-b" + args["build_id"]] - for registry in registries: - for tag in tags: - create_and_push_manifest(registry + args["ubi_suffix"], tag, architectures=architectures) - - def sign_image_concurrently(executor, args, futures, arch=None): - v = args["release_version"] - logger.info(f"Enqueuing signing task for version: {v}") - future = executor.submit(sign_image_in_repositories, args, arch) - futures.append(future) - - @TRACER.start_as_current_span("inner") - def inner(build_configuration: BuildConfiguration): - supported_versions = get_supported_version_for_image(image_name) - variants = get_supported_variants_for_image(image_name) - - args = args_for_daily_image(image_name) - args["build_id"] = build_id() - - completed_versions = set() - - filtered_versions = get_versions_to_rebuild(supported_versions, min_version, max_version) - if operator_version: - filtered_versions = get_versions_to_rebuild_per_operator_version(filtered_versions, operator_version) - - logger.info("Building Versions for {}: {}".format(image_name, filtered_versions)) - - with TracedThreadPoolExecutor(TRACER) as executor: - futures = [] - for version in filtered_versions: - build_configuration = copy.deepcopy(build_configuration) - if build_configuration.include_tags is None: - build_configuration.include_tags = [] - build_configuration.include_tags.extend(variants) - - logger.info("Rebuilding {} with variants {}".format(version, variants)) - args["release_version"] = version - - arch_set = get_architectures_set(build_configuration, args) - span = trace.get_current_span() - span.set_attribute("mck.architectures", str(arch_set)) - span.set_attribute("mck.architectures_numbers", len(arch_set)) - span.set_attribute("mck.release", build_configuration.is_release_step_executed()) - - if version not in completed_versions: - if arch_set == {"amd64", "arm64"}: - # We need to release the non context amd64 and arm64 images first before we can create the sbom - for arch in arch_set: - # Suffix to append to images name for multi-arch (see usage in daily.yaml inventory) - args["architecture_suffix"] = f"-{arch}" - args["platform"] = arch - sonar_build_image( - "image-daily-build", - build_configuration, - args, - inventory="inventories/daily.yaml", - with_sbom=False, - ) - if build_configuration.sign: - sign_image_concurrently(executor, copy.deepcopy(args), futures, arch) - create_and_push_manifests(args, list(arch_set)) - for arch in arch_set: - args["architecture_suffix"] = f"-{arch}" - args["platform"] = arch - logger.info(f"Enqueuing SBOM production task for image: {version}") - future = executor.submit(produce_sbom, build_configuration, copy.deepcopy(args)) - futures.append(future) - if build_configuration.sign: - sign_image_concurrently(executor, copy.deepcopy(args), futures) - else: - # No suffix for single arch images - args["architecture_suffix"] = "" - args["platform"] = "amd64" - sonar_build_image( - "image-daily-build", - build_configuration, - args, - inventory="inventories/daily.yaml", - ) - if build_configuration.sign: - sign_image_concurrently(executor, copy.deepcopy(args), futures) - completed_versions.add(version) - - # wait for all signings to be done - logger.info("Waiting for all tasks to complete...") - encountered_error = False - # all the futures contain concurrent sbom and signing tasks - for future in futures: - try: - future.result() - except Exception as e: - logger.error(f"Error in future: {e}") - encountered_error = True - - executor.shutdown(wait=True) - logger.info("All tasks completed.") - - # we execute them concurrently with retries, if one of them eventually fails, we fail the whole task - if encountered_error: - logger.info("Some tasks failed.") - exit(1) - - return inner - - -@TRACER.start_as_current_span("sign_image_in_repositories") -def sign_image_in_repositories(args: Dict[str, str], arch: str = None): - span = trace.get_current_span() - repository = args["quay_registry"] + args["ubi_suffix"] - tag = args["release_version"] - if arch: - tag = f"{tag}-{arch}" - - span.set_attribute("mck.tag", tag) - - sign_image(repository, tag) - verify_signature(repository, tag) - - -def find_om_in_releases(om_version: str, releases: Dict[str, str]) -> Optional[str]: - """ - There are a few alternatives out there that allow for json-path or xpath-type - traversal of Json objects in Python, I don't have time to look for one of - them now but I have to do at some point. - """ - for release in releases: - if release["version"] == om_version: - for platform in release["platform"]: - if platform["package_format"] == "deb" and platform["arch"] == "x86_64": - for package in platform["packages"]["links"]: - if package["name"] == "tar.gz": - return package["download_link"] - return None - - -def get_om_releases() -> Dict[str, str]: - """Returns a dictionary representation of the Json document holdin all the OM - releases. - """ - ops_manager_release_archive = ( - "https://info-mongodb-com.s3.amazonaws.com/com-download-center/ops_manager_release_archive.json" - ) - - return requests.get(ops_manager_release_archive).json() - - -def find_om_url(om_version: str) -> str: - """Gets a download URL for a given version of OM.""" - releases = get_om_releases() - - current_release = find_om_in_releases(om_version, releases["currentReleases"]) - if current_release is None: - current_release = find_om_in_releases(om_version, releases["oldReleases"]) - - if current_release is None: - raise ValueError("Ops Manager version {} could not be found".format(om_version)) - - return current_release - - -def build_init_om_image(build_configuration: BuildConfiguration): - release = get_release() - init_om_version = release["initOpsManagerVersion"] - args = {"version": init_om_version} - build_image_generic(build_configuration, "init-ops-manager", "inventories/init_om.yaml", args) - - -def build_om_image(build_configuration: BuildConfiguration): - # Make this a parameter for the Evergreen build - # https://github.com/evergreen-ci/evergreen/wiki/Parameterized-Builds - om_version = os.environ.get("om_version") - if om_version is None: - raise ValueError("`om_version` should be defined.") - - om_download_url = os.environ.get("om_download_url", "") - if om_download_url == "": - om_download_url = find_om_url(om_version) - - args = { - "version": om_version, - "om_download_url": om_download_url, - } - - build_image_generic( - config=build_configuration, - image_name="ops-manager", - inventory_file="inventories/om.yaml", - extra_args=args, - registry_address_override=f"{QUAY_REGISTRY_URL}/mongodb-enterprise-ops-manager", - ) - - -@TRACER.start_as_current_span("build_image_generic") -def build_image_generic( - config: BuildConfiguration, - image_name: str, - inventory_file: str, - extra_args: dict = None, - with_image_base: bool = True, - is_multi_arch: bool = False, - multi_arch_args_list: list = None, - is_run_in_parallel: bool = False, - registry_address_override: str = "", -): - """Build image generic builds context images and is used for triggering release. During releases - it signs and verifies the context image. - The release process uses the daily images build process. - The with_image_base parameter determines whether the image being built should include a base image prefix. - When set to True, the function prepends "mongodb-kubernetes-" to the image name - """ - image_base = "" - if with_image_base: - image_base = "mongodb-kubernetes-" - - if not multi_arch_args_list: - multi_arch_args_list = [extra_args or {}] - version = multi_arch_args_list[0].get("version", "") - - if config.is_release_step_executed(): - registry = f"{QUAY_REGISTRY_URL}/{image_base}{image_name}" - else: - registry = f"{config.base_repository}/{image_base}{image_name}" - - if registry_address_override: - registry = registry_address_override - - try: - for args in multi_arch_args_list: # in case we are building multiple architectures - args["quay_registry"] = registry - sonar_build_image(image_name, config, args, inventory_file, False) - if is_multi_arch: - # we only push the manifests of the context images here, - # since daily rebuilds will push the manifests for the proper images later - architectures = [v["architecture"] for v in multi_arch_args_list] - create_and_push_manifest(registry, f"{version}-context", architectures=architectures) - if not config.is_release_step_executed(): - # Normally daily rebuild would create and push the manifests for the non-context images. - # But since we don't run daily rebuilds on ecr image builds, we can do that step instead here. - # We only need to push manifests for multi-arch images. - create_and_push_manifest(registry, version, architectures=architectures) - latest_tag = "latest" - if not is_running_in_patch() and is_running_in_evg_pipeline(): - logger.info(f"Tagging and pushing {registry}:{version} as {latest_tag}") - try: - client = docker.from_env() - source_image = client.images.pull(f"{registry}:{version}") - source_image.tag(registry, latest_tag) - client.images.push(registry, tag=latest_tag) - span = trace.get_current_span() - span.set_attribute("mck.image.push_latest", f"{registry}:{latest_tag}") - logger.info(f"Successfully tagged and pushed {registry}:{latest_tag}") - except docker.errors.DockerException as e: - logger.error(f"Failed to tag/push {latest_tag} image: {e}") - raise - else: - logger.info( - f"Skipping tagging and pushing {registry}:{version} as {latest_tag} tag; is_running_in_patch={is_running_in_patch()}, is_running_in_evg_pipeline={is_running_in_evg_pipeline()}" - ) - - # Sign and verify the context image if on releases if required. - if config.sign and config.is_release_step_executed(): - sign_and_verify_context_image(registry, version) - - span = trace.get_current_span() - span.set_attribute("mck.image.image_name", image_name) - span.set_attribute("mck.image.version", version) - span.set_attribute("mck.image.is_release", config.is_release_step_executed()) - span.set_attribute("mck.image.is_multi_arch", is_multi_arch) - - if config.is_release_step_executed() and version and QUAY_REGISTRY_URL in registry: - logger.info( - f"finished building context images, releasing them now via daily builds process for" - f" image: {image_name} and version: {version}!" - ) - if is_run_in_parallel: - time.sleep(random.uniform(0, 5)) - build_image_daily(image_name, version, version)(config) - - except Exception as e: - logger.error(f"Error during build_image_generic for image {image_name}: {e}") - logger.error(f"Full traceback for build_image_generic error:") - for line in traceback.format_exception(type(e), e, e.__traceback__): - logger.error(line.rstrip()) - raise - - -def sign_and_verify_context_image(registry, version): - sign_image(registry, version + "-context") - verify_signature(registry, version + "-context") - - -def build_init_appdb(build_configuration: BuildConfiguration): - release = get_release() - version = release["initAppDbVersion"] - base_url = "https://fastdl.mongodb.org/tools/db/" - mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) - args = {"version": version, "is_appdb": True, "mongodb_tools_url_ubi": mongodb_tools_url_ubi} - build_image_generic(build_configuration, "init-appdb", "inventories/init_appdb.yaml", args) - - -def build_community_image(build_configuration: BuildConfiguration, image_type: str): - """ - Builds image for community components (readiness probe, upgrade hook). - - Args: - build_configuration: The build configuration to use - image_type: Type of image to build ("readiness-probe" or "upgrade-hook") - """ - - if image_type == "readiness-probe": - image_name = "mongodb-kubernetes-readinessprobe" - inventory_file = "inventories/readiness_probe.yaml" - elif image_type == "upgrade-hook": - image_name = "mongodb-kubernetes-operator-version-upgrade-post-start-hook" - inventory_file = "inventories/upgrade_hook.yaml" - else: - raise ValueError(f"Unsupported image type: {image_type}") - - version = get_git_release_tag() - golang_version = os.getenv("GOLANG_VERSION", "1.24") - - # Use only amd64 if we should skip arm64 builds - if should_skip_arm64(build_configuration): - architectures = ["amd64"] - logger.info("Skipping ARM64 builds for community image as this is running in EVG pipeline as a patch") - else: - architectures = build_configuration.architecture or ["amd64", "arm64"] - - multi_arch_args_list = [] - - for arch in architectures: - arch_args = { - "version": version, - "golang_version": golang_version, - "architecture": arch, - } - multi_arch_args_list.append(arch_args) - - build_image_generic( - config=build_configuration, - image_name=image_name, - with_image_base=False, - multi_arch_args_list=multi_arch_args_list, - inventory_file=inventory_file, - is_multi_arch=True, # We for pushing manifest anyway, even if arm64 is skipped in patches - ) - - -def build_readiness_probe_image(build_configuration: BuildConfiguration): - """ - Builds image used for readiness probe. - """ - build_community_image(build_configuration, "readiness-probe") - - -def build_upgrade_hook_image(build_configuration: BuildConfiguration): - """ - Builds image used for version upgrade post-start hook. - """ - build_community_image(build_configuration, "upgrade-hook") - - -def build_multi_arch_agent_in_sonar( - build_configuration: BuildConfiguration, - image_version, - tools_version, -): - """ - Creates the multi-arch non-operator suffixed version of the agent. - This is a drop-in replacement for the agent - release from MCO. - This should only be called during releases. - Which will lead to a release of the multi-arch - images to quay and ecr. - """ - - logger.info(f"building multi-arch base image for: {image_version}") - args = { - "version": image_version, - "tools_version": tools_version, - } - - arch_arm = { - "agent_distro": "amzn2_aarch64", - "tools_distro": get_tools_distro(tools_version=tools_version)["arm"], - "architecture": "arm64", - } - arch_amd = { - "agent_distro": "rhel9_x86_64", - "tools_distro": get_tools_distro(tools_version=tools_version)["amd"], - "architecture": "amd64", - } - - new_rhel_tool_version = "100.10.0" - if Version(tools_version) >= Version(new_rhel_tool_version): - arch_arm["tools_distro"] = "rhel93-aarch64" - arch_amd["tools_distro"] = "rhel93-x86_64" - - joined_args = [args | arch_amd] - - # Only include arm64 if we shouldn't skip it - if not should_skip_arm64(build_configuration): - joined_args.append(args | arch_arm) - - build_image_generic( - config=build_configuration, - image_name="mongodb-agent", - inventory_file="inventories/agent.yaml", - multi_arch_args_list=joined_args, - with_image_base=False, - is_multi_arch=True, # We for pushing manifest anyway, even if arm64 is skipped in patches - is_run_in_parallel=True, - ) - - -def build_agent(build_configuration: BuildConfiguration): - agent_versions_to_build = detect_ops_manager_changes() - if not agent_versions_to_build: - logger.info("No changes detected, skipping agent build") - return - - logger.info(f"Building Agent versions: {agent_versions_to_build}") - - tasks_queue = Queue() - max_workers = 1 - if build_configuration.parallel: - max_workers = None - if build_configuration.parallel_factor > 0: - max_workers = build_configuration.parallel_factor - with ProcessPoolExecutor(max_workers=max_workers) as executor: - logger.info(f"Running with factor of {max_workers}") - for idx, agent_tools_version in enumerate(agent_versions_to_build): - # We don't need to keep create and push the same image on every build. - # It is enough to create and push the non-operator suffixed images only during releases to ecr and quay. - logger.info(f"======= Building Agent {agent_tools_version} ({idx}/{len(agent_versions_to_build)})") - _build_agents( - agent_tools_version, - build_configuration, - executor, - tasks_queue, - ) - - queue_exception_handling(tasks_queue) - - -@TRACER.start_as_current_span("queue_exception_handling") -def queue_exception_handling(tasks_queue): - span = trace.get_current_span() - - exceptions_found = False - exception_count = 0 - total_tasks = len(tasks_queue.queue) - exception_types = set() - - span.set_attribute("mck.agent.queue.tasks_total", total_tasks) - - for task in tasks_queue.queue: - if task.exception() is not None: - exceptions_found = True - exception_count += 1 - exception_types.add(type(task.exception()).__name__) - - exception_info = task.exception() - logger.fatal(f"=== THREAD EXCEPTION DETAILS ===") - logger.fatal(f"Exception Type: {type(exception_info).__name__}") - logger.fatal(f"Exception Message: {str(exception_info)}") - logger.fatal(f"=== END THREAD EXCEPTION DETAILS ===") - - span.set_attribute("mck.agent.queue.exceptions_count", exception_count) - span.set_attribute( - "mck.agent.queue.success_rate", ((total_tasks - exception_count) / total_tasks * 100) if total_tasks > 0 else 0 - ) - span.set_attribute("mck.agent.queue.exception_types", list(exception_types)) - span.set_attribute("mck.agent.queue.has_exceptions", exceptions_found) - - if exceptions_found: - raise Exception( - f"Exception(s) found when processing Agent images. \nSee also previous logs for more info\nFailing the build" - ) - - -def _build_agents( - agent_tools_version: Tuple[str, str], - build_configuration: BuildConfiguration, - executor: ProcessPoolExecutor, - tasks_queue: Queue, -): - agent_version = agent_tools_version[0] - tools_version = agent_tools_version[1] - - tasks_queue.put( - executor.submit( - build_multi_arch_agent_in_sonar, - build_configuration, - agent_version, - tools_version, - ) - ) - - -def get_builder_function_for_image_name() -> Dict[str, Callable]: - """Returns a dictionary of image names that can be built.""" - - image_builders = { - "cli": build_CLI_SBOM, - "test": build_tests_image, - "operator": build_operator_image, - "mco-test": build_mco_tests_image, - # TODO: add support to build this per patch - "readiness-probe": build_readiness_probe_image, - "upgrade-hook": build_upgrade_hook_image, - "operator-quick": build_operator_image_patch, - "database": build_database_image, - "agent-pct": build_agent, - "agent": build_agent, - # - # Init images - "init-appdb": build_init_appdb, - "init-database": build_init_database, - "init-ops-manager": build_init_om_image, - # - # Daily builds - "operator-daily": build_image_daily("mongodb-kubernetes"), - "appdb-daily": build_image_daily("appdb"), - "database-daily": build_image_daily("database"), - "init-appdb-daily": build_image_daily("init-appdb"), - "init-database-daily": build_image_daily("init-database"), - "init-ops-manager-daily": build_image_daily("init-ops-manager"), - "ops-manager-6-daily": build_image_daily("ops-manager", min_version="6.0.0", max_version="7.0.0"), - "ops-manager-7-daily": build_image_daily("ops-manager", min_version="7.0.0", max_version="8.0.0"), - "ops-manager-8-daily": build_image_daily("ops-manager", min_version="8.0.0", max_version="9.0.0"), - # - # Ops Manager image - "ops-manager": build_om_image, - # This only builds the agents without the operator suffix - "mongodb-agent-daily": build_image_daily("mongodb-agent", operator_version="onlyAgents"), - # Community images - "readinessprobe-daily": build_image_daily( - "readinessprobe", - ), - "operator-version-upgrade-post-start-hook-daily": build_image_daily( - "operator-version-upgrade-post-start-hook", - ), - "mongodb-kubernetes-operator-daily": build_image_daily("mongodb-kubernetes-operator"), - } - - return image_builders - - -# TODO: nam static: remove this once static containers becomes the default -def build_init_database(build_configuration: BuildConfiguration): - release = get_release() - version = release["initDatabaseVersion"] # comes from release.json - base_url = "https://fastdl.mongodb.org/tools/db/" - mongodb_tools_url_ubi = "{}{}".format(base_url, release["mongodbToolsBundle"]["ubi"]) - args = {"version": version, "mongodb_tools_url_ubi": mongodb_tools_url_ubi, "is_appdb": False} - build_image_generic(build_configuration, "init-database", "inventories/init_database.yaml", args) - - -def build_image(image_name: str, build_configuration: BuildConfiguration): - """Builds one of the supported images by its name.""" - get_builder_function_for_image_name()[image_name](build_configuration) - - -def build_all_images( - images: Iterable[str], - builder: str, - debug: bool = False, - parallel: bool = False, - architecture: Optional[List[str]] = None, - sign: bool = False, - all_agents: bool = False, - agent_to_build: str = "", - parallel_factor: int = 0, -): - """Builds all the images in the `images` list.""" - build_configuration = operator_build_configuration( - builder, parallel, debug, architecture, sign, all_agents, parallel_factor, agent_to_build - ) - if sign: - mongodb_artifactory_login() - for image in images: - build_image(image, build_configuration) - - -def calculate_images_to_build( - images: List[str], include: Optional[List[str]], exclude: Optional[List[str]] -) -> Set[str]: - """ - Calculates which images to build based on the `images`, `include` and `exclude` sets. - - >>> calculate_images_to_build(["a", "b"], ["a"], ["b"]) - ... ["a"] - """ - - if not include and not exclude: - return set(images) - include = set(include or []) - exclude = set(exclude or []) - images = set(images or []) - - for image in include.union(exclude): - if image not in images: - raise ValueError("Image definition {} not found".format(image)) - - images_to_build = include.intersection(images) - if exclude: - images_to_build = images.difference(exclude) - return images_to_build - - -def main(): - _setup_tracing() - - parser = argparse.ArgumentParser() - parser.add_argument("--include", action="append", help="list of images to include") - parser.add_argument("--exclude", action="append", help="list of images to exclude") - parser.add_argument("--builder", default="docker", type=str, help="docker or podman") - parser.add_argument("--list-images", action="store_true") - parser.add_argument("--parallel", action="store_true", default=False) - parser.add_argument("--debug", action="store_true", default=False) - parser.add_argument( - "--arch", - choices=["amd64", "arm64"], - nargs="+", - help="for daily builds only, specify the list of architectures to build for images", - ) - parser.add_argument("--sign", action="store_true", default=False) - parser.add_argument( - "--parallel-factor", - type=int, - default=0, - help="the factor on how many agents are build in parallel. 0 means all CPUs will be used", - ) - parser.add_argument( - "--all-agents", - action="store_true", - default=False, - help="optional parameter to be able to push " - "all non operator suffixed agents, even if we are not in a release", - ) - parser.add_argument( - "--build-one-agent", - default="", - help="optional parameter to push one agent", - ) - args = parser.parse_args() - - if args.list_images: - print(get_builder_function_for_image_name().keys()) - sys.exit(0) - - if args.arch == ["arm64"]: - print("Building for arm64 only is not supported yet") - sys.exit(1) - - if not args.sign: - logger.warning("--sign flag not provided, images won't be signed") - - images_to_build = calculate_images_to_build( - list(get_builder_function_for_image_name().keys()), args.include, args.exclude - ) - - build_all_images( - images_to_build, - args.builder, - debug=args.debug, - parallel=args.parallel, - architecture=args.arch, - sign=args.sign, - all_agents=args.all_agents, - agent_to_build=args.build_one_agent, - parallel_factor=args.parallel_factor, - ) - - -if __name__ == "__main__": - main() diff --git a/pipeline_test.py b/pipeline_test.py deleted file mode 100644 index 61bbed197..000000000 --- a/pipeline_test.py +++ /dev/null @@ -1,342 +0,0 @@ -import os -import subprocess -import unittest -from unittest.mock import patch - -import pytest - -from pipeline import ( - calculate_images_to_build, - get_versions_to_rebuild, - get_versions_to_rebuild_per_operator_version, - is_version_in_range, - operator_build_configuration, -) -from scripts.release.build.image_signing import run_command_with_retries - -release_json = { - "supportedImages": { - "mongodb-agent": { - "opsManagerMapping": { - "cloud_manager": "13.19.0.8937-1", - "cloud_manager_tools": "100.9.4", - "ops_manager": { - "6.0.0": {"agent_version": "12.0.30.7791-1", "tools_version": "100.9.4"}, - "6.0.21": {"agent_version": "12.0.29.7785-1", "tools_version": "100.9.4"}, - "6.0.22": {"agent_version": "12.0.30.7791-1", "tools_version": "100.9.4"}, - "7.0.0": {"agent_version": "107.0.1.8507-1", "tools_version": "100.9.4"}, - "7.0.1": {"agent_version": "107.0.1.8507-1", "tools_version": "100.9.4"}, - "7.0.2": {"agent_version": "107.0.2.8531-1", "tools_version": "100.9.4"}, - "7.0.3": {"agent_version": "107.0.3.8550-1", "tools_version": "100.9.4"}, - "6.0.23": {"agent_version": "12.0.31.7825-1", "tools_version": "100.9.4"}, - "7.0.4": {"agent_version": "107.0.4.8567-1", "tools_version": "100.9.4"}, - "7.0.6": {"agent_version": "107.0.6.8587-1", "tools_version": "100.9.4"}, - "7.0.7": {"agent_version": "107.0.7.8596-1", "tools_version": "100.9.4"}, - "7.0.10": {"agent_version": "107.0.10.8627-1", "tools_version": "100.9.5"}, - "7.0.11": {"agent_version": "107.0.11.8645-1", "tools_version": "100.10.0"}, - }, - } - } - } -} - - -def test_operator_build_configuration(): - with patch.dict(os.environ, {"distro": "a_distro", "BASE_REPO_URL": "somerepo/url", "namespace": "something"}): - config = operator_build_configuration("builder", True, False) - assert config.image_type == "a_distro" - assert config.base_repository == "somerepo/url" - assert config.namespace == "something" - - -def test_operator_build_configuration_defaults(): - with patch.dict( - os.environ, - { - "BASE_REPO_URL": "", - }, - ): - config = operator_build_configuration("builder", True, False) - assert config.image_type == "ubi" - assert config.base_repository == "" - assert config.namespace == "default" - - -@pytest.mark.parametrize( - "test_case", - [ - (["a", "b", "c"], ["a"], ["b"], {"a", "c"}), - (["a", "b", "c"], ["a", "b"], None, {"a", "b"}), - (["a", "b", "c"], None, ["a"], {"b", "c"}), - (["a", "b", "c"], [], [], {"a", "b", "c"}), - (["a", "b", "c"], ["d"], None, ValueError), - (["a", "b", "c"], None, ["d"], ValueError), - ([], ["a"], ["b"], ValueError), - (["a", "b", "c"], None, None, {"a", "b", "c"}), - # Given an include, it should only return include images - (["cli", "ops-manager", "appdb-daily", "init-appdb"], ["cli"], [], {"cli"}), - # Given no include nor excludes it should return all images - ( - ["cli", "ops-manager", "appdb-daily", "init-appdb"], - [], - [], - {"init-appdb", "appdb-daily", "ops-manager", "cli"}, - ), - # Given an exclude, it should return all images except the excluded ones - ( - ["cli", "ops-manager", "appdb-daily", "init-appdb"], - [], - ["init-appdb", "appdb-daily"], - {"ops-manager", "cli"}, - ), - # Given an include and a different exclude, it should return all images except the exclusions - ( - ["cli", "ops-manager", "appdb-daily", "init-appdb"], - ["appdb-daily"], - ["init-appdb"], - {"appdb-daily", "cli", "ops-manager"}, - ), - # Given multiple includes and a different exclude, it should return all images except the exclusions - ( - ["cli", "ops-manager", "appdb-daily", "init-appdb"], - ["cli", "appdb-daily"], - ["init-appdb"], - {"appdb-daily", "cli", "ops-manager"}, - ), - ], -) -def test_calculate_images_to_build(test_case): - images, include, exclude, expected = test_case - if expected is ValueError: - with pytest.raises(ValueError): - calculate_images_to_build(images, include, exclude) - else: - assert calculate_images_to_build(images, include, exclude) == expected - - -@pytest.mark.parametrize( - "version,min_version,max_version,expected", - [ - # When one bound is empty or None, always return True - ("7.0.0", "8.0.0", "", True), - ("7.0.0", "8.0.0", None, True), - ("9.0.0", "", "8.0.0", True), - # Upper bound is excluded - ("8.1.1", "8.0.0", "8.1.1", False), - # Lower bound is included - ("8.0.0", "8.0.0", "8.1.1", True), - # Test some values - ("8.5.2", "8.5.1", "8.5.3", True), - ("8.5.2", "8.5.3", "8.4.2", False), - ], -) -def test_is_version_in_range(version, min_version, max_version, expected): - assert is_version_in_range(version, min_version, max_version) == expected - - -@pytest.mark.parametrize( - "description,case", - [ - ("No skip or include tags", {"skip_tags": [], "include_tags": [], "expected": True}), - ("Include 'release' only", {"skip_tags": [], "include_tags": ["release"], "expected": True}), - ("Skip 'release' only", {"skip_tags": ["release"], "include_tags": [], "expected": False}), - ("Include non-release, no skip", {"skip_tags": [], "include_tags": ["test", "deploy"], "expected": False}), - ("Skip non-release, no include", {"skip_tags": ["test", "deploy"], "include_tags": [], "expected": True}), - ("Include and skip 'release'", {"skip_tags": ["release"], "include_tags": ["release"], "expected": False}), - ( - "Skip non-release, include 'release'", - {"skip_tags": ["test", "deploy"], "include_tags": ["release"], "expected": True}, - ), - ], -) -def test_is_release_step_executed(description, case): - config = operator_build_configuration("builder", True, False) - config.skip_tags = case["skip_tags"] - config.include_tags = case["include_tags"] - result = config.is_release_step_executed() - assert result == case["expected"], f"Test failed: {description}. Expected {case['expected']}, got {result}." - - -def test_get_versions_to_rebuild_multiple_versions(): - supported_versions = ["6.0.0", "6.0.1", "6.0.21", "6.11.0", "7.0.0"] - expected_agents = ["6.0.0", "6.0.1", "6.0.21"] - agents = get_versions_to_rebuild(supported_versions, "6.0.0", "6.10.0") - actual_agents = [] - for a in agents: - actual_agents.append(a) - assert actual_agents == expected_agents - - -def test_get_versions_to_rebuild_multiple_versions_per_operator(): - supported_versions = ["107.0.1.8507-1_1.27.0", "107.0.1.8507-1_1.28.0", "100.0.1.8507-1_1.28.0"] - expected_agents = ["107.0.1.8507-1_1.28.0", "100.0.1.8507-1_1.28.0"] - agents = get_versions_to_rebuild_per_operator_version(supported_versions, "1.28.0") - assert agents == expected_agents - - -def test_get_versions_to_rebuild_multiple_versions_per_operator_only_non_suffixed(): - supported_versions = ["107.0.1.8507-1_1.27.0", "107.0.10.8627-1-arm64", "100.0.10.8627-1"] - expected_agents = ["107.0.10.8627-1-arm64", "100.0.10.8627-1"] - agents = get_versions_to_rebuild_per_operator_version(supported_versions, "onlyAgents") - assert agents == expected_agents - - -command = ["echo", "Hello World"] - - -class TestRunCommandWithRetries(unittest.TestCase): - @patch("subprocess.run") - @patch("time.sleep", return_value=None) # to avoid actual sleeping during tests - def test_successful_command(self, mock_sleep, mock_run): - # Mock a successful command execution - mock_run.return_value = subprocess.CompletedProcess(command, 0, stdout="Hello World", stderr="") - - result = run_command_with_retries(command) - self.assertEqual(result.stdout, "Hello World") - mock_run.assert_called_once() - mock_sleep.assert_not_called() - - @patch("subprocess.run") - @patch("time.sleep", return_value=None) # to avoid actual sleeping during tests - def test_retryable_error(self, mock_sleep, mock_run): - # Mock a retryable error first, then a successful command execution - mock_run.side_effect = [ - subprocess.CalledProcessError(500, command, stderr="500 Internal Server Error"), - subprocess.CompletedProcess(command, 0, stdout="Hello World", stderr=""), - ] - - result = run_command_with_retries(command) - self.assertEqual(result.stdout, "Hello World") - self.assertEqual(mock_run.call_count, 2) - mock_sleep.assert_called_once() - - @patch("subprocess.run") - @patch("time.sleep", return_value=None) # to avoid actual sleeping during tests - def test_non_retryable_error(self, mock_sleep, mock_run): - # Mock a non-retryable error - mock_run.side_effect = subprocess.CalledProcessError(1, command, stderr="1 Some Error") - - with self.assertRaises(subprocess.CalledProcessError): - run_command_with_retries(command) - - self.assertEqual(mock_run.call_count, 1) - mock_sleep.assert_not_called() - - @patch("subprocess.run") - @patch("time.sleep", return_value=None) # to avoid actual sleeping during tests - def test_all_retries_fail(self, mock_sleep, mock_run): - # Mock all attempts to fail with a retryable error - mock_run.side_effect = subprocess.CalledProcessError(500, command, stderr="500 Internal Server Error") - - with self.assertRaises(subprocess.CalledProcessError): - run_command_with_retries(command, retries=3) - - self.assertEqual(mock_run.call_count, 3) - self.assertEqual(mock_sleep.call_count, 2) - - -@patch("pipeline.shutil.which", return_value="/mock/path/to/docker") -@patch("subprocess.run") -def test_create_and_push_manifest_success(mock_run, mock_which): - """Test successful creation and pushing of manifest with multiple architectures.""" - # Setup mock to return success for both calls - mock_run.return_value = subprocess.CompletedProcess(args=[], returncode=0, stdout=b"", stderr=b"") - - image = "test/image" - tag = "1.0.0" - architectures = ["amd64", "arm64"] - - from pipeline import create_and_push_manifest - - create_and_push_manifest(image, tag, architectures) - - assert mock_run.call_count == 2 - - # Verify first call - create manifest - create_call_args = mock_run.call_args_list[0][0][0] - assert create_call_args == [ - "/mock/path/to/docker", - "manifest", - "create", - "test/image:1.0.0", - "--amend", - "test/image:1.0.0-amd64", - "--amend", - "test/image:1.0.0-arm64", - ] - - # Verify second call - push manifest - push_call_args = mock_run.call_args_list[1][0][0] - assert push_call_args == ["/mock/path/to/docker", "manifest", "push", f"{image}:{tag}"] - - -@patch("pipeline.shutil.which", return_value="/mock/path/to/docker") -@patch("subprocess.run") -def test_create_and_push_manifest_single_arch(mock_run, mock_which): - """Test manifest creation with a single architecture.""" - # Setup mock to return success for both calls - mock_run.return_value = subprocess.CompletedProcess(args=[], returncode=0, stdout=b"", stderr=b"") - - image = "test/image" - tag = "1.0.0" - architectures = ["amd64"] - - from pipeline import create_and_push_manifest - - create_and_push_manifest(image, tag, architectures) - - # Verify first call - create manifest (should only include one architecture) - create_call_args = mock_run.call_args_list[0][0][0] - assert ( - " ".join(create_call_args) - == "/mock/path/to/docker manifest create test/image:1.0.0 --amend test/image:1.0.0-amd64" - ) - - -@patch("pipeline.shutil.which", return_value="/mock/path/to/docker") -@patch("subprocess.run") -def test_create_and_push_manifest_create_error(mock_run, mock_which): - """Test error handling when manifest creation fails.""" - # Setup mock to return error for create call - mock_run.return_value = subprocess.CompletedProcess( - args=[], returncode=1, stdout=b"", stderr=b"Error creating manifest" - ) - - image = "test/image" - tag = "1.0.0" - architectures = ["amd64", "arm64"] - - from pipeline import create_and_push_manifest - - # Verify exception is raised with the stderr content - with pytest.raises(Exception) as exc_info: - create_and_push_manifest(image, tag, architectures) - - assert "Error creating manifest" in str(exc_info.value) - assert mock_run.call_count == 1 # Only the create call, not the push call - - -@patch("pipeline.shutil.which", return_value="/mock/path/to/docker") -@patch("subprocess.run") -def test_create_and_push_manifest_push_error(mock_run, mock_which): - """Test error handling when manifest push fails.""" - # Setup mock to return success for create but error for push - mock_run.side_effect = [ - subprocess.CompletedProcess(args=[], returncode=0, stdout=b"", stderr=b""), # create success - subprocess.CompletedProcess(args=[], returncode=1, stdout=b"", stderr=b"Push failed"), # push fails - ] - - # Call function with test parameters - image = "test/image" - tag = "1.0.0" - architectures = ["amd64", "arm64"] - - from pipeline import create_and_push_manifest - - # Verify exception is raised with the stderr content - with pytest.raises(Exception) as exc_info: - create_and_push_manifest(image, tag, architectures) - - # The function raises the stderr directly, so we should check for the exact error message - assert "Push failed" in str(exc_info.value) - assert mock_run.call_count == 2 # Both create and push calls diff --git a/scripts/dev/contexts/e2e_mdb_kind_ubi_cloudqa b/scripts/dev/contexts/e2e_mdb_kind_ubi_cloudqa index 73202c211..e1f13453c 100644 --- a/scripts/dev/contexts/e2e_mdb_kind_ubi_cloudqa +++ b/scripts/dev/contexts/e2e_mdb_kind_ubi_cloudqa @@ -10,7 +10,7 @@ source "${script_dir}/root-context" export ops_manager_version="cloud_qa" # This is required to be able to rebuild the om image and use that image which has been rebuilt -export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +export OPS_MANAGER_REGISTRY="${BASE_REPO_URL}" CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') export CUSTOM_OM_VERSION diff --git a/scripts/dev/contexts/e2e_static_mdb_kind_ubi_cloudqa b/scripts/dev/contexts/e2e_static_mdb_kind_ubi_cloudqa index 0ee88f209..c007157d7 100644 --- a/scripts/dev/contexts/e2e_static_mdb_kind_ubi_cloudqa +++ b/scripts/dev/contexts/e2e_static_mdb_kind_ubi_cloudqa @@ -11,7 +11,7 @@ export ops_manager_version="cloud_qa" export MDB_DEFAULT_ARCHITECTURE=static # This is required to be able to rebuild the om image and use that image which has been rebuilt -export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +export OPS_MANAGER_REGISTRY="${BASE_REPO_URL}" CUSTOM_OM_VERSION=$(grep -E "^\s*-\s*&ops_manager_70_latest\s+(\S+)\s+#" <"${script_dir}"/../../../.evergreen.yml | awk '{print $3}') export CUSTOM_OM_VERSION diff --git a/scripts/dev/contexts/evg-private-context b/scripts/dev/contexts/evg-private-context index c8bab1176..dec163d8c 100644 --- a/scripts/dev/contexts/evg-private-context +++ b/scripts/dev/contexts/evg-private-context @@ -107,8 +107,8 @@ export CODE_SNIPPETS_COMMIT_OUTPUT=${code_snippets_commit_output:-"false"} # MCO # shellcheck disable=SC2154 -export READINESS_PROBE_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe:${version_id}" -export VERSION_UPGRADE_HOOK_IMAGE="268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-operator-version-upgrade-post-start-hook:${version_id}" +export READINESS_PROBE_IMAGE="${BASE_REPO_URL}/mongodb-kubernetes-readinessprobe:${version_id}" +export VERSION_UPGRADE_HOOK_IMAGE="${BASE_REPO_URL}/mongodb-kubernetes-operator-version-upgrade-post-start-hook:${version_id}" # TODO to be removed at public preview stage of community-search export COMMUNITY_PRIVATE_PREVIEW_PULLSECRET_DOCKERCONFIGJSON="${community_private_preview_pullsecret_dockerconfigjson}" diff --git a/scripts/dev/contexts/root-context b/scripts/dev/contexts/root-context index 888039232..3b9f86cbd 100644 --- a/scripts/dev/contexts/root-context +++ b/scripts/dev/contexts/root-context @@ -131,5 +131,5 @@ fi export OPERATOR_NAME="mongodb-kubernetes-operator" # Variables used for release process -export INITIAL_COMMIT_SHA="9ed5f98fc70c5b3442f633d2393265fb8a2aba0c" -export INITIAL_VERSION="1.3.0" +export RELEASE_INITIAL_COMMIT_SHA="9ed5f98fc70c5b3442f633d2393265fb8a2aba0c" +export RELEASE_INITIAL_VERSION="1.3.0" diff --git a/scripts/dev/contexts/variables/om60 b/scripts/dev/contexts/variables/om60 index a9df4cdcc..dac7e8375 100644 --- a/scripts/dev/contexts/variables/om60 +++ b/scripts/dev/contexts/variables/om60 @@ -15,5 +15,5 @@ export CUSTOM_MDB_PREV_VERSION=5.0.7 export CUSTOM_APPDB_VERSION=6.0.21-ent export TEST_MODE=opsmanager -export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev -export APPDB_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +export OPS_MANAGER_REGISTRY="${BASE_REPO_URL}" +export APPDB_REGISTRY="${BASE_REPO_URL}" diff --git a/scripts/dev/contexts/variables/om70 b/scripts/dev/contexts/variables/om70 index c4e53174f..09e636593 100644 --- a/scripts/dev/contexts/variables/om70 +++ b/scripts/dev/contexts/variables/om70 @@ -18,5 +18,5 @@ export AGENT_IMAGE="${MDB_AGENT_IMAGE_REPOSITORY}:${AGENT_VERSION}" export CUSTOM_APPDB_VERSION=7.0.18-ent export TEST_MODE=opsmanager -export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev -export APPDB_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +export OPS_MANAGER_REGISTRY="${BASE_REPO_URL}" +export APPDB_REGISTRY="${BASE_REPO_URL}" diff --git a/scripts/dev/contexts/variables/om80 b/scripts/dev/contexts/variables/om80 index 200c9b690..4d84cb943 100644 --- a/scripts/dev/contexts/variables/om80 +++ b/scripts/dev/contexts/variables/om80 @@ -18,5 +18,5 @@ export AGENT_IMAGE="${MDB_AGENT_IMAGE_REPOSITORY}:${AGENT_VERSION}" export CUSTOM_APPDB_VERSION=8.0.6-ent export TEST_MODE=opsmanager -export OPS_MANAGER_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev -export APPDB_REGISTRY=268558157000.dkr.ecr.us-east-1.amazonaws.com/dev +export OPS_MANAGER_REGISTRY="${BASE_REPO_URL}" +export APPDB_REGISTRY="${BASE_REPO_URL}" diff --git a/scripts/release/atomic_pipeline.py b/scripts/release/atomic_pipeline.py index e165e940d..679a20af7 100755 --- a/scripts/release/atomic_pipeline.py +++ b/scripts/release/atomic_pipeline.py @@ -2,6 +2,7 @@ """This atomic_pipeline script knows about the details of our Docker images and where to fetch and calculate parameters.""" +import datetime import json import os import shutil @@ -62,7 +63,7 @@ def build_image( span = trace.get_current_span() span.set_attribute("mck.image_name", image_name) - registries = build_configuration.get_registries + registries = build_configuration.get_registries() build_args = build_args or {} @@ -72,14 +73,21 @@ def build_image( span.set_attribute("mck.platforms", build_configuration.platforms) # Build the image once with all repository tags - all_tags = [f"{registry}:{build_configuration.version}" for registry in build_configuration.registries] + tags = [] + for registry in registries: + tags.append(f"{registry}:{build_configuration.version}") + if build_configuration.latest_tag: + tags.append(f"{registry}:latest") + if build_configuration.olm_tag: + olm_tag = create_olm_version_tag(build_configuration.version) + tags.append(f"{registry}:{olm_tag}") logger.info( - f"Building image with tags {all_tags} for platforms={build_configuration.platforms}, dockerfile args: {build_args}" + f"Building image with tags {tags} for platforms={build_configuration.platforms}, dockerfile args: {build_args}" ) execute_docker_build( - tags=all_tags, + tags=tags, dockerfile=build_configuration.dockerfile_path, path=build_path, args=build_args, @@ -91,7 +99,7 @@ def build_image( logger.info("Logging in MongoDB Artifactory for Garasign image") mongodb_artifactory_login() logger.info("Signing image") - for registry in build_configuration.registries: + for registry in registries: sign_image(registry, build_configuration.version) verify_signature(registry, build_configuration.version) @@ -430,3 +438,9 @@ def queue_exception_handling(tasks_queue): def load_release_file() -> Dict: with open("release.json") as release: return json.load(release) + + +def create_olm_version_tag(version: str) -> str: + now = datetime.datetime.now() + timestamp_suffix = now.strftime("%Y%m%d%H%M%S") + return f"{version}-olm-{timestamp_suffix}" diff --git a/scripts/release/build/build_info.py b/scripts/release/build/build_info.py index d5c1a86db..8ffd2868d 100644 --- a/scripts/release/build/build_info.py +++ b/scripts/release/build/build_info.py @@ -3,13 +3,8 @@ from typing import Dict, List from scripts.release.build.build_scenario import BuildScenario -from scripts.release.constants import ( - DEFAULT_REPOSITORY_PATH, - DEFAULT_CHANGELOG_PATH, - RELEASE_INITIAL_VERSION_ENV_VAR, - get_initial_version, - get_initial_commit_sha, -) +from scripts.release.constants import DEFAULT_REPOSITORY_PATH, DEFAULT_CHANGELOG_PATH, RELEASE_INITIAL_VERSION_ENV_VAR, \ + get_initial_version, get_initial_commit_sha MEKO_TESTS_IMAGE = "meko-tests" OPERATOR_IMAGE = "operator" @@ -29,24 +24,26 @@ class ImageInfo: repositories: List[str] platforms: list[str] - version: str + version: str | None dockerfile_path: str - sign: bool + sign: bool = False + latest_tag: bool = False + olm_tag: bool = False @dataclass class BinaryInfo: s3_store: str platforms: list[str] - version: str - sign: bool + version: str | None + sign: bool = False @dataclass class HelmChartInfo: - repository: List[str] - version: str - sign: bool + repositories: List[str] + version: str | None + sign: bool = False @dataclass @@ -61,7 +58,7 @@ def load_build_info( repository_path: str = DEFAULT_REPOSITORY_PATH, changelog_sub_path: str = DEFAULT_CHANGELOG_PATH, initial_commit_sha: str = None, - initial_version: str = None, + initial_version: str = None ) -> BuildInfo: f""" Load build information based on the specified scenario. @@ -80,7 +77,6 @@ def load_build_info( initial_version = get_initial_version() version = scenario.get_version(repository_path, changelog_sub_path, initial_commit_sha, initial_version) - # For manual_release, version can be None and will be set by image-specific logic with open("build_info.json", "r") as f: build_info = json.load(f) @@ -108,6 +104,8 @@ def load_build_info( version=image_version, dockerfile_path=data["dockerfile-path"], sign=scenario_data.get("sign", False), + latest_tag=scenario_data.get("latest-tag", False), + olm_tag=scenario_data.get("olm-tag", False), ) binaries = {} @@ -132,7 +130,7 @@ def load_build_info( continue helm_charts[name] = HelmChartInfo( - repository=scenario_data["repositories"], + repositories=scenario_data["repositories"], version=version, sign=scenario_data.get("sign", False), ) diff --git a/scripts/release/build/build_scenario.py b/scripts/release/build/build_scenario.py index 2fb27e173..da9822533 100644 --- a/scripts/release/build/build_scenario.py +++ b/scripts/release/build/build_scenario.py @@ -3,7 +3,8 @@ from git import Repo from lib.base_logger import logger -from scripts.release.constants import triggered_by_git_tag, is_evg_patch, is_running_in_evg, get_version_id +from scripts.release.constants import triggered_by_git_tag, is_evg_patch, is_running_in_evg, get_version_id, \ + get_github_commit from scripts.release.version import calculate_next_version COMMIT_SHA_LENGTH = 8 @@ -23,18 +24,19 @@ def infer_scenario_from_environment(cls) -> "BuildScenario": is_patch = is_evg_patch() is_evg = is_running_in_evg() patch_id = get_version_id() + commit_sha = get_github_commit() if git_tag: # Release scenario and the git tag will be used for promotion process only scenario = BuildScenario.RELEASE logger.info(f"Build scenario: {scenario} (git_tag: {git_tag})") - elif is_patch or is_evg: + elif is_patch and is_evg: scenario = BuildScenario.PATCH logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") # TODO: Uncomment the following lines when starting to work on staging builds # elif is_evg: # scenario = BuildScenario.STAGING - # logger.info(f"Build scenario: {scenario} (patch_id: {patch_id})") + # logger.info(f"Build scenario: {scenario} (commit_sha: {commit_sha[:COMMIT_SHA_LENGTH]})") else: scenario = BuildScenario.DEVELOPMENT logger.info(f"Build scenario: {scenario}") diff --git a/scripts/release/build/image_build_configuration.py b/scripts/release/build/image_build_configuration.py index 537e15eba..b62718276 100644 --- a/scripts/release/build/image_build_configuration.py +++ b/scripts/release/build/image_build_configuration.py @@ -10,6 +10,8 @@ class ImageBuildConfiguration: scenario: BuildScenario version: str + latest_tag: bool + olm_tag: bool registries: List[str] dockerfile_path: str diff --git a/scripts/release/build/image_build_process.py b/scripts/release/build/image_build_process.py index 8bfecc6b2..02084a84a 100644 --- a/scripts/release/build/image_build_process.py +++ b/scripts/release/build/image_build_process.py @@ -116,7 +116,6 @@ def execute_docker_build( docker_cmd.buildx.build( context_path=path, file=dockerfile, - # TODO: add tag for release builds (OLM immutable tag) tags=tags, platforms=platforms, builder=builder_name, diff --git a/scripts/release/constants.py b/scripts/release/constants.py index 20ad747a5..dd0e462c2 100644 --- a/scripts/release/constants.py +++ b/scripts/release/constants.py @@ -51,3 +51,12 @@ def get_version_id() -> str | None: :return: version_id (patch ID) or None if not set """ return os.getenv("version_id") + + +def get_github_commit() -> str | None: + """ + ${github_commit} is the commit hash of the commit that triggered the patch run. + For non pull-request patches, it will be the same as ${revision}. + :return: github_commit or None if not set + """ + return os.getenv("github_commit") diff --git a/scripts/release/pipeline_main.py b/scripts/release/pipeline_main.py index aaea971bd..010620284 100644 --- a/scripts/release/pipeline_main.py +++ b/scripts/release/pipeline_main.py @@ -109,6 +109,8 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: # Resolve final values with overrides version = args.version or image_build_info.version + latest_tag = image_build_info.latest_tag + olm_tag = image_build_info.olm_tag if args.registry: registries = [args.registry] else: @@ -125,6 +127,8 @@ def image_build_config_from_args(args) -> ImageBuildConfiguration: return ImageBuildConfiguration( scenario=build_scenario, version=version, + latest_tag=latest_tag, + olm_tag=olm_tag, registries=registries, dockerfile_path=dockerfile_path, parallel=args.parallel, diff --git a/scripts/release/release_info.py b/scripts/release/release_info.py index d36ee3545..487d5a66a 100644 --- a/scripts/release/release_info.py +++ b/scripts/release/release_info.py @@ -71,7 +71,7 @@ def convert_to_release_info_json(build_info: BuildInfo) -> dict: for name, chart in build_info.helm_charts.items(): output["helm-charts"][name] = { - "repositories": chart.repository, + "repositories": chart.repositories, "version": chart.version, } diff --git a/scripts/release/tests/build_info_test.py b/scripts/release/tests/build_info_test.py index e3c341da7..49c910e30 100644 --- a/scripts/release/tests/build_info_test.py +++ b/scripts/release/tests/build_info_test.py @@ -22,63 +22,54 @@ def test_load_build_info_development(git_repo: Repo): platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile.atomic", - sign=False, ), "operator-race": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes"], platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile.atomic", - sign=False, ), "init-database": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database"], platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-init-database/Dockerfile.atomic", - sign=False, ), "init-appdb": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb"], platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", - sign=False, ), "init-ops-manager": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager"], platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", - sign=False, ), "database": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database"], platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile.atomic", - sign=False, ), "mco-tests": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-community-tests"], platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-community-tests/Dockerfile", - sign=False, ), "meko-tests": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-tests"], platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", - sign=False, ), "readiness-probe": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe"], platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", - sign=False, ), "upgrade-hook": ImageInfo( repositories=[ @@ -87,21 +78,18 @@ def test_load_build_info_development(git_repo: Repo): platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", - sign=False, ), "agent": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent"], platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", - sign=False, ), "ops-manager": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-enterprise-ops-manager"], platforms=["linux/amd64"], version="om-version-from-release.json", dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile.atomic", - sign=False, ), }, binaries={ @@ -109,14 +97,12 @@ def test_load_build_info_development(git_repo: Repo): s3_store="s3://kubectl-mongodb/dev", platforms=["linux/amd64"], version=version, - sign=False, ) }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( - repository=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/helm-charts"], + repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/helm-charts"], version=version, - sign=False, ) }, ) @@ -137,63 +123,54 @@ def test_load_build_info_patch(git_repo: Repo): platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile.atomic", - sign=False, ), "operator-race": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes"], platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile.atomic", - sign=False, ), "init-database": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-database"], platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-kubernetes-init-database/Dockerfile.atomic", - sign=False, ), "init-appdb": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-appdb"], platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", - sign=False, ), "init-ops-manager": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-init-ops-manager"], platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", - sign=False, ), "database": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-database"], platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile.atomic", - sign=False, ), "mco-tests": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-community-tests"], platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-community-tests/Dockerfile", - sign=False, ), "meko-tests": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-tests"], platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", - sign=False, ), "readiness-probe": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-kubernetes-readinessprobe"], platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", - sign=False, ), "upgrade-hook": ImageInfo( repositories=[ @@ -202,21 +179,18 @@ def test_load_build_info_patch(git_repo: Repo): platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", - sign=False, ), "agent": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-agent"], platforms=["linux/amd64"], version=patch_id, dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", - sign=False, ), "ops-manager": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/mongodb-enterprise-ops-manager"], platforms=["linux/amd64"], version="om-version-from-release.json", dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile.atomic", - sign=False, ), }, binaries={ @@ -224,14 +198,12 @@ def test_load_build_info_patch(git_repo: Repo): s3_store="s3://kubectl-mongodb/dev", platforms=["linux/amd64"], version=patch_id, - sign=False, ) }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( - repository=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/helm-charts"], + repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/dev/helm-charts"], version=patch_id, - sign=False, ) }, ) @@ -267,6 +239,7 @@ def test_load_build_info_staging(git_repo: Repo): platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-init-database/Dockerfile.atomic", + latest_tag=True, sign=True, ), "init-appdb": ImageInfo( @@ -274,6 +247,7 @@ def test_load_build_info_staging(git_repo: Repo): platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", + latest_tag=True, sign=True, ), "init-ops-manager": ImageInfo( @@ -283,6 +257,7 @@ def test_load_build_info_staging(git_repo: Repo): platforms=["linux/amd64"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", + latest_tag=True, sign=True, ), "database": ImageInfo( @@ -290,6 +265,7 @@ def test_load_build_info_staging(git_repo: Repo): platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile.atomic", + latest_tag=True, sign=True, ), "mco-tests": ImageInfo( @@ -297,20 +273,19 @@ def test_load_build_info_staging(git_repo: Repo): platforms=["linux/amd64"], version=expected_commit_sha, dockerfile_path="docker/mongodb-community-tests/Dockerfile", - sign=False, ), "meko-tests": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-tests"], platforms=["linux/arm64", "linux/amd64"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-tests/Dockerfile", - sign=False, ), "readiness-probe": ImageInfo( repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/mongodb-kubernetes-readinessprobe"], platforms=["linux/arm64", "linux/amd64"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", + latest_tag=True, sign=True, ), "upgrade-hook": ImageInfo( @@ -320,6 +295,7 @@ def test_load_build_info_staging(git_repo: Repo): platforms=["linux/arm64", "linux/amd64"], version=expected_commit_sha, dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", + latest_tag=True, sign=True, ), "agent": ImageInfo( @@ -347,7 +323,7 @@ def test_load_build_info_staging(git_repo: Repo): }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( - repository=["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/helm-charts"], + repositories=["268558157000.dkr.ecr.us-east-1.amazonaws.com/staging/helm-charts"], version=expected_commit_sha, sign=True, ) @@ -372,6 +348,7 @@ def test_load_build_info_release( platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-kubernetes-operator/Dockerfile.atomic", + olm_tag=True, sign=True, ), "init-database": ImageInfo( @@ -379,6 +356,7 @@ def test_load_build_info_release( platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-kubernetes-init-database/Dockerfile.atomic", + olm_tag=True, sign=True, ), "init-appdb": ImageInfo( @@ -386,6 +364,7 @@ def test_load_build_info_release( platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-kubernetes-init-appdb/Dockerfile.atomic", + olm_tag=True, sign=True, ), "init-ops-manager": ImageInfo( @@ -393,6 +372,7 @@ def test_load_build_info_release( platforms=["linux/amd64"], version=version, dockerfile_path="docker/mongodb-kubernetes-init-ops-manager/Dockerfile.atomic", + olm_tag=True, sign=True, ), "database": ImageInfo( @@ -400,6 +380,7 @@ def test_load_build_info_release( platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=version, dockerfile_path="docker/mongodb-kubernetes-database/Dockerfile.atomic", + olm_tag=True, sign=True, ), "readiness-probe": ImageInfo( @@ -407,6 +388,7 @@ def test_load_build_info_release( platforms=["linux/arm64", "linux/amd64"], version=readinessprobe_version, dockerfile_path="docker/mongodb-kubernetes-readinessprobe/Dockerfile.atomic", + olm_tag=True, sign=True, ), "upgrade-hook": ImageInfo( @@ -414,6 +396,7 @@ def test_load_build_info_release( platforms=["linux/arm64", "linux/amd64"], version=operator_version_upgrade_post_start_hook_version, dockerfile_path="docker/mongodb-kubernetes-upgrade-hook/Dockerfile.atomic", + olm_tag=True, sign=True, ), }, @@ -427,7 +410,7 @@ def test_load_build_info_release( }, helm_charts={ "mongodb-kubernetes": HelmChartInfo( - repository=["quay.io/mongodb/helm-charts"], + repositories=["quay.io/mongodb/helm-charts"], version=version, sign=True, ) @@ -450,6 +433,7 @@ def test_load_build_info_manual_release(git_repo: Repo): platforms=["linux/arm64", "linux/amd64", "linux/s390x", "linux/ppc64le"], version=None, # Version is None for manual_release scenario dockerfile_path="docker/mongodb-agent/Dockerfile.atomic", + olm_tag=True, sign=True, ), "ops-manager": ImageInfo( @@ -457,6 +441,7 @@ def test_load_build_info_manual_release(git_repo: Repo): platforms=["linux/amd64"], version=None, # Version is None for manual_release scenario dockerfile_path="docker/mongodb-enterprise-ops-manager/Dockerfile.atomic", + olm_tag=True, sign=True, ), },