From 4683e582bf8e677e2f38c818edb551096f92edab Mon Sep 17 00:00:00 2001 From: Nick Cross Date: Mon, 7 Oct 2024 17:17:22 +0100 Subject: [PATCH 1/8] Enable Tekton alpha features --- deploy/minikube-development.sh | 1 + deploy/openshift-ci.sh | 1 + deploy/openshift-development.sh | 2 ++ 3 files changed, 4 insertions(+) diff --git a/deploy/minikube-development.sh b/deploy/minikube-development.sh index 87e87ab5f..a4d0da0d7 100755 --- a/deploy/minikube-development.sh +++ b/deploy/minikube-development.sh @@ -20,6 +20,7 @@ while ! oc get pods -n tekton-pipelines | grep tekton-pipelines-webhook | grep " done #we need to make sure the tekton webhook has its rules installed kubectl wait --for=jsonpath='{.webhooks[0].rules}' --timeout=300s mutatingwebhookconfigurations.admissionregistration.k8s.io webhook.pipeline.tekton.dev +kubectl patch cm feature-flags -n tekton-pipelines -p '{"data":{"enable-api-fields":"alpha"}}' echo -e "\033[0;32mTekton controller is running\033[0m" #CRDS are sometimes racey diff --git a/deploy/openshift-ci.sh b/deploy/openshift-ci.sh index 09aeec72c..b06f1af65 100755 --- a/deploy/openshift-ci.sh +++ b/deploy/openshift-ci.sh @@ -30,6 +30,7 @@ oc apply -f ${DIR}/base/pipelines/openshift-pipelines-subscription.yaml waitFor "oc get ns openshift-pipelines" waitFor "oc get pods -n openshift-pipelines | grep tekton-pipelines-controller | grep Running" waitFor "oc get mutatingwebhookconfigurations.admissionregistration.k8s.io webhook.pipeline.tekton.dev -o yaml | grep rules" +oc patch tektonconfigs.operator.tekton.dev config --type=merge -p '{"spec":{"pipeline":{"enable-api-fields":"alpha"}}}' echo "Tekton controller is running" oc create ns jvm-build-service || true diff --git a/deploy/openshift-development.sh b/deploy/openshift-development.sh index cba53d300..ea84e6ae5 100755 --- a/deploy/openshift-development.sh +++ b/deploy/openshift-development.sh @@ -21,6 +21,8 @@ fi DIR=`dirname $0` $DIR/base-development.sh $1 +kubectl patch tektonconfigs.operator.tekton.dev config --type=merge -p '{"spec":{"pipeline":{"enable-api-fields":"alpha"}}}' + if [ "$deploy_maven_repo" = true ]; then export REPOSILITE_IMAGE=$(sed -n 's/^FROM //p' $DIR/../openshift-with-appstudio-test/e2e/Dockerfile.reposilite) kubectl delete -f $DIR/maven-repo.yaml --ignore-not-found From cbc2848c54066e96dd7a4798ba7e48077cb7feeb Mon Sep 17 00:00:00 2001 From: Nick Cross Date: Mon, 7 Oct 2024 20:26:04 +0100 Subject: [PATCH 2/8] Move task definitions to systemconfig and update to use main branch --- pkg/apis/jvmbuildservice/v1alpha1/jbsconfig_types.go | 5 ----- pkg/apis/jvmbuildservice/v1alpha1/systemconfig_types.go | 7 +++++++ pkg/reconciler/dependencybuild/buildrecipeyaml.go | 2 +- pkg/reconciler/dependencybuild/dependencybuild.go | 2 +- 4 files changed, 9 insertions(+), 7 deletions(-) diff --git a/pkg/apis/jvmbuildservice/v1alpha1/jbsconfig_types.go b/pkg/apis/jvmbuildservice/v1alpha1/jbsconfig_types.go index 8efefc84f..0141a3b6d 100644 --- a/pkg/apis/jvmbuildservice/v1alpha1/jbsconfig_types.go +++ b/pkg/apis/jvmbuildservice/v1alpha1/jbsconfig_types.go @@ -32,11 +32,6 @@ const ( ConfigArtifactCacheIOThreadsDefault = "4" ConfigArtifactCacheWorkerThreadsDefault = "50" ConfigArtifactCacheStorageDefault = "10Gi" - - KonfluxGitDefinition = "https://raw.githubusercontent.com/konflux-ci/build-definitions/refs/heads/main/task/git-clone/0.1/git-clone.yaml" - KonfluxPreBuildDefinitions = "https://raw.githubusercontent.com/rnc/jvm-build-service/PODTEMPLATE/deploy/tasks/pre-build.yaml" - KonfluxBuildDefinitions = "https://raw.githubusercontent.com/redhat-appstudio/jvm-build-service/main/deploy/tasks/buildah-oci-ta.yaml" - KonfluxMavenDeployDefinitions = "https://raw.githubusercontent.com/rnc/jvm-build-service/PODTEMPLATE/deploy/tasks/maven-deployment.yaml" ) type JBSConfigSpec struct { diff --git a/pkg/apis/jvmbuildservice/v1alpha1/systemconfig_types.go b/pkg/apis/jvmbuildservice/v1alpha1/systemconfig_types.go index 7d82d14cc..8f6a33fd7 100644 --- a/pkg/apis/jvmbuildservice/v1alpha1/systemconfig_types.go +++ b/pkg/apis/jvmbuildservice/v1alpha1/systemconfig_types.go @@ -44,3 +44,10 @@ type SystemConfigList struct { metav1.ListMeta `json:"metadata,omitempty"` Items []SystemConfig `json:"items"` } + +const ( + KonfluxGitDefinition = "https://raw.githubusercontent.com/konflux-ci/build-definitions/refs/heads/main/task/git-clone/0.1/git-clone.yaml" + KonfluxPreBuildDefinitions = "https://raw.githubusercontent.com/redhat-appstudio/jvm-build-service/main/deploy/tasks/pre-build.yaml" + KonfluxBuildDefinitions = "https://raw.githubusercontent.com/konflux-ci/build-definitions/refs/heads/main/task/buildah-oci-ta/0.2/buildah-oci-ta.yaml" + KonfluxMavenDeployDefinitions = "https://raw.githubusercontent.com/redhat-appstudio/jvm-build-service/main/deploy/tasks/maven-deployment.yaml" +) diff --git a/pkg/reconciler/dependencybuild/buildrecipeyaml.go b/pkg/reconciler/dependencybuild/buildrecipeyaml.go index 8a0095c24..a993c8749 100644 --- a/pkg/reconciler/dependencybuild/buildrecipeyaml.go +++ b/pkg/reconciler/dependencybuild/buildrecipeyaml.go @@ -61,7 +61,7 @@ var buildEntryScript string //go:embed scripts/Dockerfile.build-trusted-artifacts var buildTrustedArtifacts string -func createDeployPipelineSpec(jbsConfig *v1alpha1.JBSConfig, buildRequestProcessorImage string, orasOptions string) (*tektonpipeline.PipelineSpec, error) { +func createDeployPipelineSpec(jbsConfig *v1alpha1.JBSConfig, buildRequestProcessorImage string) (*tektonpipeline.PipelineSpec, error) { // Original deploy pipeline used to run maven deployment and also tag the images using 'oras tag' // with the SHA256 encoded sum of the GAVs. resolver := tektonpipeline.ResolverRef{ diff --git a/pkg/reconciler/dependencybuild/dependencybuild.go b/pkg/reconciler/dependencybuild/dependencybuild.go index 1093d7f92..bae85e3fe 100644 --- a/pkg/reconciler/dependencybuild/dependencybuild.go +++ b/pkg/reconciler/dependencybuild/dependencybuild.go @@ -1424,7 +1424,7 @@ func (r *ReconcileDependencyBuild) handleStateDeploying(ctx context.Context, db Pipeline: &v12.Duration{Duration: time.Hour * v1alpha1.DefaultTimeout}, Tasks: &v12.Duration{Duration: time.Hour * v1alpha1.DefaultTimeout}, } - pr.Spec.PipelineSpec, err = createDeployPipelineSpec(jbsConfig, buildRequestProcessorImage, orasOptions) + pr.Spec.PipelineSpec, err = createDeployPipelineSpec(jbsConfig, buildRequestProcessorImage) if err != nil { return reconcile.Result{}, err } From ca560aeaad1b2007366883d85baff1893fcd63cc Mon Sep 17 00:00:00 2001 From: Nick Cross Date: Tue, 8 Oct 2024 12:54:48 +0100 Subject: [PATCH 3/8] Use a computeresources for the pipeline. --- deploy/tasks/buildah-oci-ta.yaml | 11 --------- .../dependencybuild/buildrecipeyaml.go | 18 +++++++-------- .../dependencybuild/dependencybuild.go | 23 +++++++++++++++++++ pkg/reconciler/jbsconfig/jbsconfig.go | 2 +- 4 files changed, 33 insertions(+), 21 deletions(-) diff --git a/deploy/tasks/buildah-oci-ta.yaml b/deploy/tasks/buildah-oci-ta.yaml index 8b69c7ac1..f7501409a 100644 --- a/deploy/tasks/buildah-oci-ta.yaml +++ b/deploy/tasks/buildah-oci-ta.yaml @@ -72,10 +72,6 @@ spec: hours, days, and weeks, respectively. type: string default: "" - - name: CACHE_URL - type: string - description: For JBS, URL of the cache. - default: "" - name: PREFETCH_INPUT description: In case it is not empty, the prefetched content should be made available to the build. @@ -245,8 +241,6 @@ spec: env: - name: COMMIT_SHA value: $(params.COMMIT_SHA) - - name: CACHE_URL - value: $(params.CACHE_URL) script: | #!/bin/bash set -e @@ -398,11 +392,6 @@ spec: done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;) fi - # TODO: Rename to JBS_CACHE_URL? - if [ -n "$CACHE_URL" ]; then - BUILDAH_ARGS+=("--build-arg=CACHE_URL=$CACHE_URL") - fi - unshare -Uf $UNSHARE_ARGS --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w ${SOURCE_CODE_DIR}/$CONTEXT -- buildah build \ $VOLUME_MOUNTS \ "${BUILDAH_ARGS[@]}" \ diff --git a/pkg/reconciler/dependencybuild/buildrecipeyaml.go b/pkg/reconciler/dependencybuild/buildrecipeyaml.go index a993c8749..3e0362692 100644 --- a/pkg/reconciler/dependencybuild/buildrecipeyaml.go +++ b/pkg/reconciler/dependencybuild/buildrecipeyaml.go @@ -29,7 +29,7 @@ const ( PreBuildTaskName = "pre-build" BuildTaskName = "build" PostBuildTaskName = "post-build" - TagTaskName = "tag" + DeployTaskName = "deploy" ) //go:embed scripts/maven-build.sh @@ -81,7 +81,7 @@ func createDeployPipelineSpec(jbsConfig *v1alpha1.JBSConfig, buildRequestProcess Params: []tektonpipeline.ParamSpec{{Name: PipelineResultImageDigest, Type: tektonpipeline.ParamTypeString}}, Tasks: []tektonpipeline.PipelineTask{ { - Name: TagTaskName, + Name: DeployTaskName, TaskRef: &tektonpipeline.TaskRef{ // Can't specify name and resolver as they clash. ResolverRef: resolver, @@ -527,13 +527,13 @@ func createPipelineSpec(log logr.Logger, tool string, commitTime int64, jbsConfi StringVal: tlsVerify, }, }, - //{ - // Name: "BUILD_ARGS", - // Value: tektonpipeline.ParamValue{ - // Type: tektonpipeline.ParamTypeArray, - // ArrayVal: []string{"CACHE_URL=" + cacheUrl}, - // }, - //}, + { + Name: "BUILD_ARGS", + Value: tektonpipeline.ParamValue{ + Type: tektonpipeline.ParamTypeArray, + ArrayVal: []string{"CACHE_URL=" + cacheUrl}, + }, + }, }, // TODO: ### How to pass build-settings/tls information to buildah task? diff --git a/pkg/reconciler/dependencybuild/dependencybuild.go b/pkg/reconciler/dependencybuild/dependencybuild.go index bae85e3fe..17a0451ce 100644 --- a/pkg/reconciler/dependencybuild/dependencybuild.go +++ b/pkg/reconciler/dependencybuild/dependencybuild.go @@ -636,6 +636,7 @@ func (r *ReconcileDependencyBuild) handleStateBuilding(ctx context.Context, db * }, }}, } + // TODO: ### Enclose this within an annotation to denote test CI system in use? pr.Spec.TaskRunTemplate = tektonpipeline.PipelineTaskRunTemplate{ PodTemplate: &pod.Template{ Env: []v1.EnvVar{ @@ -646,6 +647,17 @@ func (r *ReconcileDependencyBuild) handleStateBuilding(ctx context.Context, db * }, }, } + // TODO: ### Enclose this within an annotation to denote test CI system in use? + podMemR, _ := resource.ParseQuantity("1792Mi") + podMemL, _ := resource.ParseQuantity("3584Mi") + podCPU, _ := resource.ParseQuantity("500m") + pr.Spec.TaskRunSpecs = []tektonpipeline.PipelineTaskRunSpec{{ + PipelineTaskName: BuildTaskName, + ComputeResources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{"memory": podMemR, "cpu": podCPU}, + Limits: v1.ResourceList{"memory": podMemL, "cpu": podCPU}, + }, + }} if !jbsConfig.Spec.CacheSettings.DisableTLS { pr.Spec.Workspaces = append(pr.Spec.Workspaces, tektonpipeline.WorkspaceBinding{Name: "tls", ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: v1alpha1.TlsConfigMapName}}}) @@ -1438,6 +1450,7 @@ func (r *ReconcileDependencyBuild) handleStateDeploying(ctx context.Context, db pr.Spec.Workspaces = append(pr.Spec.Workspaces, tektonpipeline.WorkspaceBinding{Name: "tls", EmptyDir: &v1.EmptyDirVolumeSource{}}) } pr.Spec.Timeouts = &tektonpipeline.TimeoutFields{Pipeline: &v12.Duration{Duration: time.Hour * v1alpha1.DefaultTimeout}} + // TODO: ### Enclose this within an annotation to denote test CI system in use? Could inline orasOptions then as well? pr.Spec.TaskRunTemplate = tektonpipeline.PipelineTaskRunTemplate{ PodTemplate: &pod.Template{ Env: []v1.EnvVar{ @@ -1448,6 +1461,16 @@ func (r *ReconcileDependencyBuild) handleStateDeploying(ctx context.Context, db }, }, } + // TODO: ### Enclose this within an annotation to denote test CI system in use? + podMem, _ := resource.ParseQuantity("1024Mi") + podCPU, _ := resource.ParseQuantity("250m") + pr.Spec.TaskRunSpecs = []tektonpipeline.PipelineTaskRunSpec{{ + PipelineTaskName: DeployTaskName, + ComputeResources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{"memory": podMem, "cpu": podCPU}, + Limits: v1.ResourceList{"memory": podMem, "cpu": podCPU}, + }, + }} if err := controllerutil.SetOwnerReference(db, &pr, r.scheme); err != nil { return reconcile.Result{}, err diff --git a/pkg/reconciler/jbsconfig/jbsconfig.go b/pkg/reconciler/jbsconfig/jbsconfig.go index 314c7ab81..e8aec8009 100644 --- a/pkg/reconciler/jbsconfig/jbsconfig.go +++ b/pkg/reconciler/jbsconfig/jbsconfig.go @@ -35,7 +35,7 @@ import ( const ( TlsServiceName = v1alpha1.CacheDeploymentName + "-tls" - TestRegistry = "jvmbuildservice.io/test-registry" + TestRegistry = "jvmbuildservice.io/test-registry" // Denote using an insecure registry in GitHub Actions RetryTimeAnnotations = "jvmbuildservice.io/retry-time" RetryTimestampAnnotations = "jvmbuildservice.io/retry-timestamp" ) From b9c1bcef1ddad1d6b72b253d4947938bbf301a31 Mon Sep 17 00:00:00 2001 From: Nick Cross Date: Wed, 9 Oct 2024 08:54:59 +0100 Subject: [PATCH 4/8] Remove buildah-oci-ta task --- deploy/tasks/buildah-oci-ta.yaml | 601 ------------------------------- 1 file changed, 601 deletions(-) delete mode 100644 deploy/tasks/buildah-oci-ta.yaml diff --git a/deploy/tasks/buildah-oci-ta.yaml b/deploy/tasks/buildah-oci-ta.yaml deleted file mode 100644 index f7501409a..000000000 --- a/deploy/tasks/buildah-oci-ta.yaml +++ /dev/null @@ -1,601 +0,0 @@ ---- -apiVersion: tekton.dev/v1 -kind: Task -metadata: - name: buildah-oci-ta - annotations: - tekton.dev/pipelines.minVersion: 0.12.1 - tekton.dev/tags: image-build, konflux - labels: - app.kubernetes.io/version: "0.2" - build.appstudio.redhat.com/build_type: docker -spec: - description: |- - Buildah task builds source code into a container image and pushes the image into container registry using buildah tool. - In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool. - When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts. - When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment. - params: - - name: ACTIVATION_KEY - description: Name of secret which contains subscription activation key - type: string - default: activation-key - - name: ADDITIONAL_SECRET - description: Name of a secret which will be made available to the build - with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET - type: string - default: does-not-exist - - name: ADD_CAPABILITIES - description: Comma separated list of extra capabilities to add when - running 'buildah build' - type: string - default: "" - - name: BUILD_ARGS - description: Array of --build-arg values ("arg=value" strings) - type: array - default: [] - - name: BUILD_ARGS_FILE - description: Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file - type: string - default: "" - - name: CACHI2_ARTIFACT - description: The Trusted Artifact URI pointing to the artifact with - the prefetched dependencies. - type: string - default: "" - - name: COMMIT_SHA - description: The image is built from this commit. - type: string - default: "" - - name: CONTEXT - description: Path to the directory to use as context. - type: string - default: . - - name: DOCKERFILE - description: Path to the Dockerfile to build. - type: string - default: ./Dockerfile - - name: ENTITLEMENT_SECRET - description: Name of secret which contains the entitlement certificates - type: string - default: etc-pki-entitlement - - name: HERMETIC - description: Determines if build will be executed without network access. - type: string - default: "false" - - name: IMAGE - description: Reference of the image buildah will produce. - type: string - - name: IMAGE_EXPIRES_AFTER - description: Delete image tag after specified time. Empty means to keep - the image tag. Time values could be something like 1h, 2d, 3w for - hours, days, and weeks, respectively. - type: string - default: "" - - name: PREFETCH_INPUT - description: In case it is not empty, the prefetched content should - be made available to the build. - type: string - default: "" - - name: SKIP_UNUSED_STAGES - description: Whether to skip stages in Containerfile that seem unused - by subsequent stages - type: string - default: "true" - - name: SOURCE_ARTIFACT - description: The Trusted Artifact URI pointing to the artifact with - the application source code. - type: string - - name: SQUASH - description: Squash all new and previous layers added as a part of this - build, as per --squash - type: string - default: "false" - - name: STORAGE_DRIVER - description: Storage driver to configure for buildah - type: string - default: vfs - - name: TARGET_STAGE - description: Target stage in Dockerfile to build. If not specified, - the Dockerfile is processed entirely to (and including) its last stage. - type: string - default: "" - - name: TLSVERIFY - description: Verify the TLS on the registry endpoint (for push/pull - to a non-TLS registry) - type: string - default: "true" - - name: YUM_REPOS_D_FETCHED - description: Path in source workspace where dynamically-fetched repos - are present - default: fetched.repos.d - - name: YUM_REPOS_D_SRC - description: Path in the git repository in which yum repository files - are stored - default: repos.d - - name: YUM_REPOS_D_TARGET - description: Target path on the container in which yum repository files - should be made available - default: /etc/yum.repos.d - - name: caTrustConfigMapKey - description: The name of the key in the ConfigMap that contains the - CA bundle data. - type: string - default: ca-bundle.crt - - name: caTrustConfigMapName - description: The name of the ConfigMap to read CA bundle data from. - type: string - default: trusted-ca - results: - - name: IMAGE_DIGEST - description: Digest of the image just built - - name: IMAGE_REF - description: Image reference of the built image - - name: IMAGE_URL - description: Image repository and tag where the built image was pushed - - name: JAVA_COMMUNITY_DEPENDENCIES - description: The Java dependencies that came from community sources - such as Maven central. - - name: SBOM_BLOB_URL - description: Reference of SBOM blob digest to enable digest-based verification - from provenance - type: string - - name: SBOM_JAVA_COMPONENTS_COUNT - description: The counting of Java components by publisher in JSON format - type: string - volumes: - - name: activation-key - secret: - optional: true - secretName: $(params.ACTIVATION_KEY) - - name: additional-secret - secret: - optional: true - secretName: $(params.ADDITIONAL_SECRET) - - name: etc-pki-entitlement - secret: - optional: true - secretName: $(params.ENTITLEMENT_SECRET) - - name: shared - emptyDir: {} - - name: trusted-ca - configMap: - items: - - key: $(params.caTrustConfigMapKey) - path: ca-bundle.crt - name: $(params.caTrustConfigMapName) - optional: true - - name: varlibcontainers - emptyDir: {} - - name: workdir - emptyDir: {} - stepTemplate: - env: - - name: ACTIVATION_KEY - value: $(params.ACTIVATION_KEY) - - name: ADDITIONAL_SECRET - value: $(params.ADDITIONAL_SECRET) - - name: ADD_CAPABILITIES - value: $(params.ADD_CAPABILITIES) - - name: BUILDAH_FORMAT - value: oci - - name: BUILD_ARGS_FILE - value: $(params.BUILD_ARGS_FILE) - - name: CONTEXT - value: $(params.CONTEXT) - - name: DOCKERFILE - value: $(params.DOCKERFILE) - - name: ENTITLEMENT_SECRET - value: $(params.ENTITLEMENT_SECRET) - - name: HERMETIC - value: $(params.HERMETIC) - - name: IMAGE - value: $(params.IMAGE) - - name: IMAGE_EXPIRES_AFTER - value: $(params.IMAGE_EXPIRES_AFTER) - - name: SKIP_UNUSED_STAGES - value: $(params.SKIP_UNUSED_STAGES) - - name: SQUASH - value: $(params.SQUASH) - - name: STORAGE_DRIVER - value: $(params.STORAGE_DRIVER) - - name: TARGET_STAGE - value: $(params.TARGET_STAGE) - - name: TLSVERIFY - value: $(params.TLSVERIFY) - - name: YUM_REPOS_D_FETCHED - value: $(params.YUM_REPOS_D_FETCHED) - - name: YUM_REPOS_D_SRC - value: $(params.YUM_REPOS_D_SRC) - - name: YUM_REPOS_D_TARGET - value: $(params.YUM_REPOS_D_TARGET) - volumeMounts: - - mountPath: /shared - name: shared - - mountPath: /var/workdir - name: workdir - steps: - - name: use-trusted-artifact - image: quay.io/redhat-appstudio/build-trusted-artifacts:latest@sha256:62bb85585c0ebb8b333ef716b87b70838fb27723657c95ffb62d446b0d28ab68 - args: - - use - - $(params.SOURCE_ARTIFACT)=/var/workdir/source - - $(params.CACHI2_ARTIFACT)=/var/workdir/cachi2 - - name: build - image: quay.io/konflux-ci/buildah-task:latest@sha256:5cbd487022fb7ac476cbfdea25513b810f7e343ec48f89dc6a4e8c3c39fa37a2 - args: - - $(params.BUILD_ARGS[*]) - workingDir: /var/workdir - volumeMounts: - - mountPath: /var/lib/containers - name: varlibcontainers - - mountPath: /entitlement - name: etc-pki-entitlement - - mountPath: /activation-key - name: activation-key - - mountPath: /additional-secret - name: additional-secret - - mountPath: /mnt/trusted-ca - name: trusted-ca - readOnly: true - env: - - name: COMMIT_SHA - value: $(params.COMMIT_SHA) - script: | - #!/bin/bash - set -e - ca_bundle=/mnt/trusted-ca/ca-bundle.crt - if [ -f "$ca_bundle" ]; then - echo "INFO: Using mounted CA bundle: $ca_bundle" - cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors - update-ca-trust - fi - - SOURCE_CODE_DIR=source - if [ -e "$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" ]; then - dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" - elif [ -e "$SOURCE_CODE_DIR/$DOCKERFILE" ]; then - dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$DOCKERFILE" - elif echo "$DOCKERFILE" | grep -q "^https\?://"; then - echo "Fetch Dockerfile from $DOCKERFILE" - dockerfile_path=$(mktemp --suffix=-Dockerfile) - http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path" "$DOCKERFILE") - if [ $http_code != 200 ]; then - echo "No Dockerfile is fetched. Server responds $http_code" - exit 1 - fi - http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path.dockerignore.tmp" "$DOCKERFILE.dockerignore") - if [ $http_code = 200 ]; then - echo "Fetched .dockerignore from $DOCKERFILE.dockerignore" - mv "$dockerfile_path.dockerignore.tmp" $SOURCE_CODE_DIR/$CONTEXT/.dockerignore - fi - else - echo "Cannot find Dockerfile $DOCKERFILE" - exit 1 - fi - if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] && grep -q '^\s*RUN \(./\)\?mvn' "$dockerfile_path"; then - sed -i -e "s|^\s*RUN \(\(./\)\?mvn\)\(.*\)|RUN echo \"mirror.defaulthttp://$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR/v1/cache/default/0/*\" > /tmp/settings.yaml; \1 -s /tmp/settings.yaml \3|g" "$dockerfile_path" - touch /var/lib/containers/java - fi - - # Fixing group permission on /var/lib/containers - chown root:root /var/lib/containers - - sed -i 's/^\s*short-name-mode\s*=\s*.*/short-name-mode = "disabled"/' /etc/containers/registries.conf - - # Setting new namespace to run buildah - 2^32-2 - echo 'root:1:4294967294' | tee -a /etc/subuid >>/etc/subgid - - BUILDAH_ARGS=() - - BASE_IMAGES=$(dockerfile-json "$dockerfile_path" | jq -r '.Stages[] | select(.From | .Stage or .Scratch | not) | .BaseName') - if [ "${HERMETIC}" == "true" ]; then - BUILDAH_ARGS+=("--pull=never") - UNSHARE_ARGS="--net" - for image in $BASE_IMAGES; do - unshare -Ufp --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -- buildah pull $image - done - echo "Build will be executed with network isolation" - fi - - if [ -n "${TARGET_STAGE}" ]; then - BUILDAH_ARGS+=("--target=${TARGET_STAGE}") - fi - - if [ -n "${BUILD_ARGS_FILE}" ]; then - BUILDAH_ARGS+=("--build-arg-file=$(pwd)/$SOURCE_CODE_DIR/${BUILD_ARGS_FILE}") - fi - - for build_arg in "$@"; do - BUILDAH_ARGS+=("--build-arg=$build_arg") - done - - if [ -n "${ADD_CAPABILITIES}" ]; then - BUILDAH_ARGS+=("--cap-add=${ADD_CAPABILITIES}") - fi - - if [ "${SQUASH}" == "true" ]; then - BUILDAH_ARGS+=("--squash") - fi - - if [ "${SKIP_UNUSED_STAGES}" != "true" ]; then - BUILDAH_ARGS+=("--skip-unused-stages=false") - fi - - if [ -f "/var/workdir/cachi2/cachi2.env" ]; then - cp -r "/var/workdir/cachi2" /tmp/ - chmod -R go+rwX /tmp/cachi2 - VOLUME_MOUNTS="--volume /tmp/cachi2:/cachi2" - # Read in the whole file (https://unix.stackexchange.com/questions/533277), then - # for each RUN ... line insert the cachi2.env command *after* any options like --mount - sed -E -i \ - -e 'H;1h;$!d;x' \ - -e 's@^\s*(run((\s|\\\n)+-\S+)*(\s|\\\n)+)@\1. /cachi2/cachi2.env \&\& \\\n @igM' \ - "$dockerfile_path" - echo "Prefetched content will be made available" - - prefetched_repo_for_my_arch="/tmp/cachi2/output/deps/rpm/$(uname -m)/repos.d/cachi2.repo" - if [ -f "$prefetched_repo_for_my_arch" ]; then - echo "Adding $prefetched_repo_for_my_arch to $YUM_REPOS_D_FETCHED" - mkdir -p "$YUM_REPOS_D_FETCHED" - cp --no-clobber "$prefetched_repo_for_my_arch" "$YUM_REPOS_D_FETCHED" - fi - fi - - # if yum repofiles stored in git, copy them to mount point outside the source dir - if [ -d "${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}" ]; then - mkdir -p ${YUM_REPOS_D_FETCHED} - cp -r ${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}/* ${YUM_REPOS_D_FETCHED} - fi - - # if anything in the repofiles mount point (either fetched or from git), mount it - if [ -d "${YUM_REPOS_D_FETCHED}" ]; then - chmod -R go+rwX ${YUM_REPOS_D_FETCHED} - mount_point=$(realpath ${YUM_REPOS_D_FETCHED}) - VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume ${mount_point}:${YUM_REPOS_D_TARGET}" - fi - - LABELS=( - "--label" "build-date=$(date -u +'%Y-%m-%dT%H:%M:%S')" - "--label" "architecture=$(uname -m)" - "--label" "vcs-type=git" - ) - [ -n "$COMMIT_SHA" ] && LABELS+=("--label" "vcs-ref=$COMMIT_SHA") - [ -n "$IMAGE_EXPIRES_AFTER" ] && LABELS+=("--label" "quay.expires-after=$IMAGE_EXPIRES_AFTER") - - ACTIVATION_KEY_PATH="/activation-key" - ENTITLEMENT_PATH="/entitlement" - - # do not enable activation key and entitlement at same time. If both vars are provided, prefer activation key. - # when activation keys are used an empty directory on shared emptydir volume to "/etc/pki/entitlement" to prevent certificates from being included in the produced container - # To use activation key file 'org' must exist, which means the key 'org' must exist in the key/value secret - - if [ -e /activation-key/org ]; then - cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key - mkdir /shared/rhsm-tmp - VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/activation-key:/activation-key -v /shared/rhsm-tmp:/etc/pki/entitlement:Z" - echo "Adding activation key to the build" - - elif find /entitlement -name "*.pem" >>null; then - cp -r --preserve=mode "$ENTITLEMENT_PATH" /tmp/entitlement - VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/entitlement:/etc/pki/entitlement" - echo "Adding the entitlement to the build" - fi - - ADDITIONAL_SECRET_PATH="/additional-secret" - ADDITIONAL_SECRET_TMP="/tmp/additional-secret" - if [ -d "$ADDITIONAL_SECRET_PATH" ]; then - cp -r --preserve=mode -L "$ADDITIONAL_SECRET_PATH" $ADDITIONAL_SECRET_TMP - while read -r filename; do - echo "Adding the secret ${ADDITIONAL_SECRET}/${filename} to the build, available at /run/secrets/${ADDITIONAL_SECRET}/${filename}" - BUILDAH_ARGS+=("--secret=id=${ADDITIONAL_SECRET}/${filename},src=$ADDITIONAL_SECRET_TMP/${filename}") - done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;) - fi - - unshare -Uf $UNSHARE_ARGS --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w ${SOURCE_CODE_DIR}/$CONTEXT -- buildah build \ - $VOLUME_MOUNTS \ - "${BUILDAH_ARGS[@]}" \ - "${LABELS[@]}" \ - --tls-verify=$TLSVERIFY --no-cache \ - --ulimit nofile=4096:4096 \ - -f "$dockerfile_path" -t $IMAGE . - - container=$(buildah from --pull-never $IMAGE) - buildah mount $container | tee /shared/container_path - # delete symlinks - they may point outside the container rootfs, messing with SBOM scanners - find $(cat /shared/container_path) -xtype l -delete - echo $container >/shared/container_name - - # Save the SBOM produced by Cachi2 so it can be merged into the final SBOM later - if [ -f "/tmp/cachi2/output/bom.json" ]; then - cp /tmp/cachi2/output/bom.json ./sbom-cachi2.json - fi - - touch /shared/base_images_digests - for image in $BASE_IMAGES; do - buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image" >>/shared/base_images_digests - done - - # Needed to generate base images SBOM - echo "$BASE_IMAGES" >/shared/base_images_from_dockerfile - computeResources: - limits: - cpu: "1" - memory: 2Gi - requests: - cpu: "50m" - memory: 512Mi - securityContext: - capabilities: - add: - - SETFCAP - - name: sbom-syft-generate - image: registry.access.redhat.com/rh-syft-tech-preview/syft-rhel9:1.4.1@sha256:34d7065427085a31dc4949bd283c001b91794d427e1e4cdf1b21ea4faf9fee3f - workingDir: /var/workdir/source - volumeMounts: - - mountPath: /var/lib/containers - name: varlibcontainers - - mountPath: /shared - name: shared - script: | - echo "Running syft on the source directory" - syft dir:/var/workdir/source --output cyclonedx-json=/var/workdir/sbom-source.json - echo "Running syft on the image filesystem" - syft dir:$(cat /shared/container_path) --output cyclonedx-json=/var/workdir/sbom-image.json - computeResources: - limits: - cpu: "1" - memory: 2Gi - requests: - cpu: 50m - memory: 512Mi - - name: analyse-dependencies-java-sbom - image: quay.io/redhat-appstudio/hacbs-jvm-build-request-processor:127ee0c223a2b56a9bd20a6f2eaeed3bd6015f77 - volumeMounts: - - mountPath: /var/lib/containers - name: varlibcontainers - - mountPath: /shared - name: shared - script: | - if [ -f /var/lib/containers/java ]; then - /opt/jboss/container/java/run/run-java.sh analyse-dependencies path $(cat /shared/container_path) -s /var/workdir/sbom-image.json --task-run-name $(context.taskRun.name) --publishers $(results.SBOM_JAVA_COMPONENTS_COUNT.path) - sed -i 's/^/ /' $(results.SBOM_JAVA_COMPONENTS_COUNT.path) # Workaround for SRVKP-2875 - else - touch $(results.JAVA_COMMUNITY_DEPENDENCIES.path) - fi - computeResources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 10m - memory: 128Mi - securityContext: - runAsUser: 0 - - name: prepare-sboms - image: quay.io/redhat-appstudio/sbom-utility-scripts-image@sha256:53a3041dff341b7fd1765b9cc2c324625d19e804b2eaff10a6e6d9dcdbde3a91 - workingDir: /var/workdir - script: | - echo "Merging contents of sbom-source.json and sbom-image.json into sbom-cyclonedx.json" - python3 /scripts/merge_syft_sboms.py - - if [ -f "sbom-cachi2.json" ]; then - echo "Merging contents of sbom-cachi2.json into sbom-cyclonedx.json" - python3 /scripts/merge_cachi2_sboms.py sbom-cachi2.json sbom-cyclonedx.json >sbom-temp.json - mv sbom-temp.json sbom-cyclonedx.json - fi - - echo "Creating sbom-purl.json" - python3 /scripts/create_purl_sbom.py - - echo "Adding base images data to sbom-cyclonedx.json" - python3 /scripts/base_images_sbom_script.py \ - --sbom=sbom-cyclonedx.json \ - --base-images-from-dockerfile=/shared/base_images_from_dockerfile \ - --base-images-digests=/shared/base_images_digests - computeResources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 10m - memory: 128Mi - securityContext: - runAsUser: 0 - - name: inject-sbom-and-push - image: quay.io/konflux-ci/buildah-task:latest@sha256:5cbd487022fb7ac476cbfdea25513b810f7e343ec48f89dc6a4e8c3c39fa37a2 - workingDir: /var/workdir - volumeMounts: - - mountPath: /var/lib/containers - name: varlibcontainers - - mountPath: /mnt/trusted-ca - name: trusted-ca - readOnly: true - script: | - #!/bin/bash - set -e - - ca_bundle=/mnt/trusted-ca/ca-bundle.crt - if [ -f "$ca_bundle" ]; then - echo "INFO: Using mounted CA bundle: $ca_bundle" - cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors - update-ca-trust - fi - - base_image_name=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.name"}}' $IMAGE | cut -f1 -d'@') - base_image_digest=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.digest"}}' $IMAGE) - container=$(buildah from --pull-never $IMAGE) - buildah copy $container sbom-cyclonedx.json sbom-purl.json /root/buildinfo/content_manifests/ - buildah config -a org.opencontainers.image.base.name=${base_image_name} -a org.opencontainers.image.base.digest=${base_image_digest} $container - - BUILDAH_ARGS=() - if [ "${SQUASH}" == "true" ]; then - BUILDAH_ARGS+=("--squash") - fi - - buildah commit "${BUILDAH_ARGS[@]}" $container $IMAGE - - status=-1 - max_run=5 - sleep_sec=10 - for run in $(seq 1 $max_run); do - status=0 - [ "$run" -gt 1 ] && sleep $sleep_sec - echo "Pushing sbom image to registry" - buildah push \ - --tls-verify=$TLSVERIFY \ - --digestfile /var/workdir/image-digest $IMAGE \ - docker://$IMAGE && break || status=$? - done - if [ "$status" -ne 0 ]; then - echo "Failed to push sbom image to registry after ${max_run} tries" - exit 1 - fi - - cat "/var/workdir"/image-digest | tee $(results.IMAGE_DIGEST.path) - echo -n "$IMAGE" | tee $(results.IMAGE_URL.path) - { - echo -n "${IMAGE}@" - cat "/var/workdir/image-digest" - } >"$(results.IMAGE_REF.path)" - - # Remove tag from IMAGE while allowing registry to contain a port number. - sbom_repo="${IMAGE%:*}" - sbom_digest="$(sha256sum sbom-cyclonedx.json | cut -d' ' -f1)" - # The SBOM_BLOB_URL is created by `cosign attach sbom`. - echo -n "${sbom_repo}@sha256:${sbom_digest}" | tee "$(results.SBOM_BLOB_URL.path)" - computeResources: - limits: - cpu: "2" - memory: 2Gi - requests: - cpu: "100m" - memory: 512Mi - securityContext: - capabilities: - add: - - SETFCAP - runAsUser: 0 - - name: upload-sbom - image: quay.io/konflux-ci/appstudio-utils:ab6b0b8e40e440158e7288c73aff1cf83a2cc8a9@sha256:24179f0efd06c65d16868c2d7eb82573cce8e43533de6cea14fec3b7446e0b14 - workingDir: /var/workdir - volumeMounts: - - mountPath: /mnt/trusted-ca - name: trusted-ca - readOnly: true - script: | - ca_bundle=/mnt/trusted-ca/ca-bundle.crt - if [ -f "$ca_bundle" ]; then - echo "INFO: Using mounted CA bundle: $ca_bundle" - cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors - update-ca-trust - fi - - cosign attach sbom --sbom sbom-cyclonedx.json --type cyclonedx "$(cat "$(results.IMAGE_REF.path)")" - computeResources: - limits: - cpu: 100m - memory: 256Mi - requests: - cpu: 10m - memory: 128Mi From b750036592423413ad891f4b98dad3dd27b5245f Mon Sep 17 00:00:00 2001 From: Nick Cross Date: Wed, 9 Oct 2024 09:01:17 +0100 Subject: [PATCH 5/8] Only apply PodTemplate and ComputeResources for CI Tests --- openshift-with-appstudio-test/e2e/util.go | 7 +- .../dependencybuild/dependencybuild.go | 84 +++++++++---------- pkg/reconciler/jbsconfig/jbsconfig.go | 1 + 3 files changed, 46 insertions(+), 46 deletions(-) diff --git a/openshift-with-appstudio-test/e2e/util.go b/openshift-with-appstudio-test/e2e/util.go index 49de3b1f5..f279d4c4c 100644 --- a/openshift-with-appstudio-test/e2e/util.go +++ b/openshift-with-appstudio-test/e2e/util.go @@ -1064,9 +1064,10 @@ func setupMinikube(t *testing.T, namespace string) *testArgs { } jbsConfig := v1alpha1.JBSConfig{ ObjectMeta: metav1.ObjectMeta{ - Namespace: ta.ns, - Name: v1alpha1.JBSConfigName, - Annotations: map[string]string{jbsconfig.TestRegistry: strconv.FormatBool(insecure)}, + Namespace: ta.ns, + Name: v1alpha1.JBSConfigName, + Annotations: map[string]string{jbsconfig.TestRegistry: strconv.FormatBool(insecure), + jbsconfig.CITests: "true"}, }, Spec: v1alpha1.JBSConfigSpec{ EnableRebuilds: true, diff --git a/pkg/reconciler/dependencybuild/dependencybuild.go b/pkg/reconciler/dependencybuild/dependencybuild.go index 17a0451ce..1c561835d 100644 --- a/pkg/reconciler/dependencybuild/dependencybuild.go +++ b/pkg/reconciler/dependencybuild/dependencybuild.go @@ -636,29 +636,30 @@ func (r *ReconcileDependencyBuild) handleStateBuilding(ctx context.Context, db * }, }}, } - // TODO: ### Enclose this within an annotation to denote test CI system in use? - pr.Spec.TaskRunTemplate = tektonpipeline.PipelineTaskRunTemplate{ - PodTemplate: &pod.Template{ - Env: []v1.EnvVar{ - { - Name: "ORAS_OPTIONS", - Value: orasOptions, + if orasOptions != "" { + pr.Spec.TaskRunTemplate = tektonpipeline.PipelineTaskRunTemplate{ + PodTemplate: &pod.Template{ + Env: []v1.EnvVar{ + { + Name: "ORAS_OPTIONS", + Value: orasOptions, + }, }, }, - }, + } + } + if jbsConfig.Annotations != nil && jbsConfig.Annotations[jbsconfig.CITests] == "true" { + podMemR, _ := resource.ParseQuantity("1792Mi") + podMemL, _ := resource.ParseQuantity("3584Mi") + podCPU, _ := resource.ParseQuantity("500m") + pr.Spec.TaskRunSpecs = []tektonpipeline.PipelineTaskRunSpec{{ + PipelineTaskName: BuildTaskName, + ComputeResources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{"memory": podMemR, "cpu": podCPU}, + Limits: v1.ResourceList{"memory": podMemL, "cpu": podCPU}, + }, + }} } - // TODO: ### Enclose this within an annotation to denote test CI system in use? - podMemR, _ := resource.ParseQuantity("1792Mi") - podMemL, _ := resource.ParseQuantity("3584Mi") - podCPU, _ := resource.ParseQuantity("500m") - pr.Spec.TaskRunSpecs = []tektonpipeline.PipelineTaskRunSpec{{ - PipelineTaskName: BuildTaskName, - ComputeResources: &v1.ResourceRequirements{ - Requests: v1.ResourceList{"memory": podMemR, "cpu": podCPU}, - Limits: v1.ResourceList{"memory": podMemL, "cpu": podCPU}, - }, - }} - if !jbsConfig.Spec.CacheSettings.DisableTLS { pr.Spec.Workspaces = append(pr.Spec.Workspaces, tektonpipeline.WorkspaceBinding{Name: "tls", ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: v1alpha1.TlsConfigMapName}}}) } else { @@ -1422,10 +1423,6 @@ func (r *ReconcileDependencyBuild) handleStateDeploying(ctx context.Context, db {Name: PipelineResultPreBuildImageDigest, Value: tektonpipeline.ResultValue{Type: tektonpipeline.ParamTypeString, StringVal: db.Status.PreBuildImages[len(db.Status.PreBuildImages)-1].BuiltImageDigest}}, } - orasOptions := "" - if jbsConfig.Annotations != nil && jbsConfig.Annotations[jbsconfig.TestRegistry] == "true" { - orasOptions = "--insecure --plain-http" - } systemConfig := v1alpha1.SystemConfig{} err = r.client.Get(ctx, types.NamespacedName{Name: systemconfig.SystemConfigKey}, &systemConfig) if err != nil { @@ -1450,28 +1447,29 @@ func (r *ReconcileDependencyBuild) handleStateDeploying(ctx context.Context, db pr.Spec.Workspaces = append(pr.Spec.Workspaces, tektonpipeline.WorkspaceBinding{Name: "tls", EmptyDir: &v1.EmptyDirVolumeSource{}}) } pr.Spec.Timeouts = &tektonpipeline.TimeoutFields{Pipeline: &v12.Duration{Duration: time.Hour * v1alpha1.DefaultTimeout}} - // TODO: ### Enclose this within an annotation to denote test CI system in use? Could inline orasOptions then as well? - pr.Spec.TaskRunTemplate = tektonpipeline.PipelineTaskRunTemplate{ - PodTemplate: &pod.Template{ - Env: []v1.EnvVar{ - { - Name: "ORAS_OPTIONS", - Value: orasOptions, + if jbsConfig.Annotations != nil && jbsConfig.Annotations[jbsconfig.TestRegistry] == "true" { + pr.Spec.TaskRunTemplate = tektonpipeline.PipelineTaskRunTemplate{ + PodTemplate: &pod.Template{ + Env: []v1.EnvVar{ + { + Name: "ORAS_OPTIONS", + Value: "--insecure --plain-http", + }, }, }, - }, + } + } + if jbsConfig.Annotations != nil && jbsConfig.Annotations[jbsconfig.CITests] == "true" { + podMem, _ := resource.ParseQuantity("1024Mi") + podCPU, _ := resource.ParseQuantity("250m") + pr.Spec.TaskRunSpecs = []tektonpipeline.PipelineTaskRunSpec{{ + PipelineTaskName: DeployTaskName, + ComputeResources: &v1.ResourceRequirements{ + Requests: v1.ResourceList{"memory": podMem, "cpu": podCPU}, + Limits: v1.ResourceList{"memory": podMem, "cpu": podCPU}, + }, + }} } - // TODO: ### Enclose this within an annotation to denote test CI system in use? - podMem, _ := resource.ParseQuantity("1024Mi") - podCPU, _ := resource.ParseQuantity("250m") - pr.Spec.TaskRunSpecs = []tektonpipeline.PipelineTaskRunSpec{{ - PipelineTaskName: DeployTaskName, - ComputeResources: &v1.ResourceRequirements{ - Requests: v1.ResourceList{"memory": podMem, "cpu": podCPU}, - Limits: v1.ResourceList{"memory": podMem, "cpu": podCPU}, - }, - }} - if err := controllerutil.SetOwnerReference(db, &pr, r.scheme); err != nil { return reconcile.Result{}, err } diff --git a/pkg/reconciler/jbsconfig/jbsconfig.go b/pkg/reconciler/jbsconfig/jbsconfig.go index e8aec8009..d830725fb 100644 --- a/pkg/reconciler/jbsconfig/jbsconfig.go +++ b/pkg/reconciler/jbsconfig/jbsconfig.go @@ -35,6 +35,7 @@ import ( const ( TlsServiceName = v1alpha1.CacheDeploymentName + "-tls" + CITests = "jvmbuildservice.io/ci-tests" // Denote running CI Tests TestRegistry = "jvmbuildservice.io/test-registry" // Denote using an insecure registry in GitHub Actions RetryTimeAnnotations = "jvmbuildservice.io/retry-time" RetryTimestampAnnotations = "jvmbuildservice.io/retry-timestamp" From bac0bca672e61da0e45967e7453e4fa9dd8c0293 Mon Sep 17 00:00:00 2001 From: Nick Cross Date: Wed, 9 Oct 2024 09:39:45 +0100 Subject: [PATCH 6/8] Use kubectl rather than oc --- deploy/minikube-development.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/deploy/minikube-development.sh b/deploy/minikube-development.sh index a4d0da0d7..eee2140ec 100755 --- a/deploy/minikube-development.sh +++ b/deploy/minikube-development.sh @@ -6,13 +6,13 @@ timeout=600 #10 minutes in seconds endTime=$(( $(date +%s) + timeout )) echo -e "\033[0;32mWaiting for Tekton Pipeines to start...\033[0m" -while ! oc get pods -n tekton-pipelines | grep tekton-pipelines-controller | grep "1/1"; do +while ! kubectl get pods -n tekton-pipelines | grep tekton-pipelines-controller | grep "1/1"; do sleep 1 if [ $(date +%s) -gt $endTime ]; then exit 1 fi done -while ! oc get pods -n tekton-pipelines | grep tekton-pipelines-webhook | grep "1/1"; do +while ! kubectl get pods -n tekton-pipelines | grep tekton-pipelines-webhook | grep "1/1"; do sleep 1 if [ $(date +%s) -gt $endTime ]; then exit 1 From 7629329ca02ccc324327b1e5d1b938806a8c10e3 Mon Sep 17 00:00:00 2001 From: Nick Cross Date: Wed, 9 Oct 2024 11:44:39 +0100 Subject: [PATCH 7/8] Add missing annotation. Add logging. --- .../e2e/jvm_build_service_test.go | 2 +- .../e2e/periodic_test.go | 2 +- openshift-with-appstudio-test/e2e/util.go | 16 +++++++--------- .../dependencybuild/dependencybuild.go | 2 ++ 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/openshift-with-appstudio-test/e2e/jvm_build_service_test.go b/openshift-with-appstudio-test/e2e/jvm_build_service_test.go index 73ca53119..c5695b8b4 100644 --- a/openshift-with-appstudio-test/e2e/jvm_build_service_test.go +++ b/openshift-with-appstudio-test/e2e/jvm_build_service_test.go @@ -8,5 +8,5 @@ import ( ) func TestExampleRun(t *testing.T) { - runBasicTests(t, setup, testNamespace) + runBasicTests(t, setupE2E, testNamespace) } diff --git a/openshift-with-appstudio-test/e2e/periodic_test.go b/openshift-with-appstudio-test/e2e/periodic_test.go index e1a6be7f4..d954320ce 100644 --- a/openshift-with-appstudio-test/e2e/periodic_test.go +++ b/openshift-with-appstudio-test/e2e/periodic_test.go @@ -20,7 +20,7 @@ import ( ) func runTests(t *testing.T, namespace string, runYaml string) { - ta := setup(t, namespace) + ta := setupE2E(t, namespace) countQuota := &corev1.ResourceQuota{ ObjectMeta: metav1.ObjectMeta{ diff --git a/openshift-with-appstudio-test/e2e/util.go b/openshift-with-appstudio-test/e2e/util.go index f279d4c4c..76a5437b4 100644 --- a/openshift-with-appstudio-test/e2e/util.go +++ b/openshift-with-appstudio-test/e2e/util.go @@ -243,11 +243,7 @@ func commonSetup(t *testing.T, gitCloneUrl string, namespace string) *testArgs { } return ta } -func setup(t *testing.T, namespace string) *testArgs { - return setupConfig(t, namespace) -} -func setupConfig(t *testing.T, namespace string) *testArgs { - +func setupE2E(t *testing.T, namespace string) *testArgs { ta := commonSetup(t, gitCloneTaskUrl, namespace) err := wait.PollUntilContextTimeout(context.TODO(), 1*time.Second, 1*time.Minute, true, func(ctx context.Context) (done bool, err error) { _, err = kubeClient.CoreV1().ServiceAccounts(ta.ns).Get(context.TODO(), "pipeline", metav1.GetOptions{}) @@ -292,8 +288,9 @@ func setupConfig(t *testing.T, namespace string) *testArgs { jbsConfig := v1alpha1.JBSConfig{ ObjectMeta: metav1.ObjectMeta{ - Namespace: ta.ns, - Name: v1alpha1.JBSConfigName, + Namespace: ta.ns, + Name: v1alpha1.JBSConfigName, + Annotations: map[string]string{jbsconfig.CITests: "true"}, }, Spec: v1alpha1.JBSConfigSpec{ EnableRebuilds: true, @@ -1066,8 +1063,9 @@ func setupMinikube(t *testing.T, namespace string) *testArgs { ObjectMeta: metav1.ObjectMeta{ Namespace: ta.ns, Name: v1alpha1.JBSConfigName, - Annotations: map[string]string{jbsconfig.TestRegistry: strconv.FormatBool(insecure), - jbsconfig.CITests: "true"}, + Annotations: map[string]string{ + jbsconfig.TestRegistry: strconv.FormatBool(insecure), + jbsconfig.CITests: "true"}, }, Spec: v1alpha1.JBSConfigSpec{ EnableRebuilds: true, diff --git a/pkg/reconciler/dependencybuild/dependencybuild.go b/pkg/reconciler/dependencybuild/dependencybuild.go index 1c561835d..bda79a8d5 100644 --- a/pkg/reconciler/dependencybuild/dependencybuild.go +++ b/pkg/reconciler/dependencybuild/dependencybuild.go @@ -649,6 +649,7 @@ func (r *ReconcileDependencyBuild) handleStateBuilding(ctx context.Context, db * } } if jbsConfig.Annotations != nil && jbsConfig.Annotations[jbsconfig.CITests] == "true" { + log.Info(fmt.Sprintf("Configuring resources for %#v", BuildTaskName)) podMemR, _ := resource.ParseQuantity("1792Mi") podMemL, _ := resource.ParseQuantity("3584Mi") podCPU, _ := resource.ParseQuantity("500m") @@ -1460,6 +1461,7 @@ func (r *ReconcileDependencyBuild) handleStateDeploying(ctx context.Context, db } } if jbsConfig.Annotations != nil && jbsConfig.Annotations[jbsconfig.CITests] == "true" { + log.Info(fmt.Sprintf("Configuring resources for %#v", DeployTaskName)) podMem, _ := resource.ParseQuantity("1024Mi") podCPU, _ := resource.ParseQuantity("250m") pr.Spec.TaskRunSpecs = []tektonpipeline.PipelineTaskRunSpec{{ From 1dc70f32081435f24be8d2baa48d46b414721ace Mon Sep 17 00:00:00 2001 From: Nick Cross Date: Wed, 9 Oct 2024 12:34:55 +0100 Subject: [PATCH 8/8] Use Ubuntu 22.04 rather than latest which is now 24 --- .github/workflows/minikube.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/minikube.yaml b/.github/workflows/minikube.yaml index e0b7be116..7bf63dcf8 100644 --- a/.github/workflows/minikube.yaml +++ b/.github/workflows/minikube.yaml @@ -4,7 +4,7 @@ on: branches: [ main ] jobs: wait-for-images: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Wait for Images run: | @@ -36,7 +36,7 @@ jobs: - "build-systems" - "commons" needs: [wait-for-images] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 name: Minikube Artifact Build Matrix steps: - name: Free Disk Space (Ubuntu) @@ -99,7 +99,7 @@ jobs: dbtestsets: - "jakartaee" needs: [wait-for-images] - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 name: Minikube Dependency Build Matrix steps: - name: Free Disk Space (Ubuntu)