diff --git a/CHANGELOG/release-notes-v1.8.0.md b/CHANGELOG/release-notes-v1.8.0.md new file mode 100644 index 0000000000..1a6ec8f889 --- /dev/null +++ b/CHANGELOG/release-notes-v1.8.0.md @@ -0,0 +1,22 @@ +## v1.8.0 + +## Enhancements +- feat: added the plugin for aws ecr retagging (#6695) +- feat: flux cd deployment (#6660) +- feat: add app name in labels list api (#6688) +- feat: Audit ci trigger, precd and post cd trigger so that retrigger can happen from last failed config snapshot (#6659) +## Bugs +- fix: argo sync (#6718) +- fix: cluster delete (#6706) +- fix: Notifier v1 removed (#6705) +- fix: app clone panic (#6696) +- fix: Spdy migration to websocket (#6682) +- fix: Fix scanning optimisation (#6683) +- fix: panic in logs api (#6684) +- fix: Empty migration seq (#6673) +## Others +- chore: added sql file of 4.21 (#6716) +- misc: added support for service extraSpec (#6702) +- chore: when output dir path is /devtroncd in any pipeline stage step then the ci runner is stuck in recursive self-copy situation (#6686) + + diff --git a/charts/devtron/Chart.yaml b/charts/devtron/Chart.yaml index adc92a13d4..7061008f7e 100644 --- a/charts/devtron/Chart.yaml +++ b/charts/devtron/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: devtron-operator -appVersion: 1.7.0 +appVersion: 1.8.0 description: Chart to configure and install Devtron. Devtron is a Kubernetes Orchestration system. keywords: - Devtron @@ -11,12 +11,12 @@ keywords: - argocd - Hyperion engine: gotpl -version: 0.22.95 +version: 0.22.96 sources: - https://github.com/devtron-labs/charts dependencies: - name: argo-cd - version: "5.9.1" + version: "7.7.15" repository: https://argoproj.github.io/argo-helm condition: argo-cd.enabled - name: security diff --git a/charts/devtron/devtron-bom.yaml b/charts/devtron/devtron-bom.yaml index 1f1f025a7d..7694928251 100644 --- a/charts/devtron/devtron-bom.yaml +++ b/charts/devtron/devtron-bom.yaml @@ -15,7 +15,7 @@ global: PG_DATABASE: orchestrator extraManifests: [] installer: - release: "v1.7.0" + release: "v1.8.0" registry: "" image: "inception" tag: "473deaa4-185-21582" @@ -40,13 +40,13 @@ components: ENABLE_RESOURCE_SCAN: "true" FEATURE_CODE_MIRROR_ENABLE: "true" registry: "" - image: "dashboard:a85f2624-690-33873" + image: "dashboard:3646fa5d-690-34658" imagePullPolicy: IfNotPresent healthPort: 8080 devtron: registry: "" - image: "hyperion:c8e75fb3-280-33879" - cicdImage: "devtron:c8e75fb3-434-33854" + image: "hyperion:22cac3b8-280-34537" + cicdImage: "devtron:22cac3b8-434-34538" imagePullPolicy: IfNotPresent customOverrides: {} podSecurityContext: @@ -60,7 +60,7 @@ components: healthPort: 8080 ciRunner: registry: "" - image: "ci-runner:a4fc9044-138-33875" + image: "ci-runner:f21e02cb-138-34532" argocdDexServer: registry: "" image: "dex:v2.30.2" @@ -69,7 +69,7 @@ components: authenticator: "authenticator:e414faff-393-13273" kubelink: registry: "" - image: "kubelink:a4fc9044-564-33855" + image: "kubelink:f21e02cb-564-34528" imagePullPolicy: IfNotPresent configs: ENABLE_HELM_RELEASE_CACHE: "true" @@ -92,7 +92,7 @@ components: healthPort: 50052 kubewatch: registry: "" - image: "kubewatch:a4fc9044-419-33852" + image: "kubewatch:f21e02cb-419-34527" imagePullPolicy: IfNotPresent healthPort: 8080 configs: @@ -116,7 +116,7 @@ components: image: postgres_exporter:v0.10.1 gitsensor: registry: "" - image: "git-sensor:a4fc9044-200-33872" + image: "git-sensor:f21e02cb-200-34529" imagePullPolicy: IfNotPresent serviceMonitor: enabled: false @@ -134,7 +134,7 @@ components: # Values for lens lens: registry: "" - image: "lens:a4fc9044-333-33874" + image: "lens:f21e02cb-333-34531" imagePullPolicy: IfNotPresent configs: GIT_SENSOR_PROTOCOL: GRPC @@ -169,7 +169,7 @@ components: entMigratorImage: "devtron-utils:geni-v1.1.4" chartSync: registry: "" - image: chart-sync:a4fc9044-836-33878 + image: chart-sync:f21e02cb-836-34536 schedule: "0 19 * * *" podSecurityContext: fsGroup: 1001 @@ -186,9 +186,18 @@ argo-cd: # -- If defined, a repository applied to all Argo CD deployments repository: quay.io/argoproj/argocd # -- Overrides the global Argo CD image tag whose default is the chart appVersion - tag: "v2.5.2" + tag: "v2.13.3" # -- If defined, a imagePullPolicy applied to all Argo CD deployments imagePullPolicy: IfNotPresent + configs: + cm: + create: false + # argocd-rbac-cm + rbac: + create: true + policy.default: role:admin + applicationSet: + replicas: 0 # Change below values for workflow controller workflowController: registry: "quay.io/argoproj" @@ -198,7 +207,7 @@ workflowController: IMDSv1ExecutorImage: "argoexec:v3.0.7" security: imageScanner: - image: "image-scanner:a4fc9044-141-33877" + image: "image-scanner:f21e02cb-141-34534" healthPort: 8080 configs: TRIVY_DB_REPOSITORY: mirror.gcr.io/aquasec/trivy-db @@ -209,7 +218,7 @@ security: tag: 4.3.6 # Values for notifier integration notifier: - image: "notifier:19d654ff-372-33876" + image: "notifier:fb96112e-372-34533" healthPort: 3000 minio: image: "minio:RELEASE.2021-02-14T04-01-33Z" diff --git a/charts/devtron/templates/argocd-secret.yaml b/charts/devtron/templates/argocd-secret.yaml index 1d55e545bc..ee12df5593 100644 --- a/charts/devtron/templates/argocd-secret.yaml +++ b/charts/devtron/templates/argocd-secret.yaml @@ -64,173 +64,205 @@ data: health.lua: | hs = {} if obj.status ~= nil then - if obj.status.status ~= nil then - hs.status = "Degraded" - hs.message = obj.status.status - else + if obj.status.status ~= nil then + hs.status = "Degraded" + hs.message = obj.status.status + else hs.status = "Healthy" - end + end else - hs.status = "Healthy" + hs.status = "Healthy" end return hs argoproj.io/Rollout: health.lua: | + function getNumberValueOrDefault(field) + if field ~= nil then + return field + end + return 0 + end + function checkPaused(obj) + hs = {} + local paused = false + if obj.status.verifyingPreview ~= nil then + paused = obj.status.verifyingPreview + elseif obj.spec.paused ~= nil then + paused = obj.spec.paused + end + + if paused then + hs.status = "Suspended" + hs.message = "Rollout is paused" + return hs + end + return nil + end function checkReplicasStatus(obj) - hs = {} - replicasCount = getNumberValueOrDefault(obj.spec.replicas) - replicasStatus = getNumberValueOrDefault(obj.status.replicas) - updatedReplicas = getNumberValueOrDefault(obj.status.updatedReplicas) - availableReplicas = getNumberValueOrDefault(obj.status.availableReplicas) + hs = {} + replicasCount = getNumberValueOrDefault(obj.spec.replicas) + replicasStatus = getNumberValueOrDefault(obj.status.replicas) + updatedReplicas = getNumberValueOrDefault(obj.status.updatedReplicas) + availableReplicas = getNumberValueOrDefault(obj.status.availableReplicas) - if updatedReplicas < replicasCount then + if updatedReplicas < replicasCount then hs.status = "Progressing" hs.message = "Waiting for roll out to finish: More replicas need to be updated" return hs - end - -- Since the scale down delay can be very high, BlueGreen does not wait for all the old replicas to scale - -- down before marking itself healthy. As a result, only evaluate this condition if the strategy is canary. - if obj.spec.strategy.canary ~= nil and replicasStatus > updatedReplicas then + end + -- Since the scale down delay can be very high, BlueGreen does not wait for all the old replicas to scale + -- down before marking itself healthy. As a result, only evaluate this condition if the strategy is canary. + if obj.spec.strategy.canary ~= nil and replicasStatus > updatedReplicas then hs.status = "Progressing" hs.message = "Waiting for roll out to finish: old replicas are pending termination" return hs - end - if availableReplicas < updatedReplicas then + end + if availableReplicas < updatedReplicas then hs.status = "Progressing" hs.message = "Waiting for roll out to finish: updated replicas are still becoming available" return hs - end - return nil - end - - function getNumberValueOrDefault(field) - if field ~= nil then - return field - end - return 0 - end - - function checkPaused(obj) - hs = {} - local paused = false - if obj.status.verifyingPreview ~= nil then - paused = obj.status.verifyingPreview - elseif obj.spec.paused ~= nil then - paused = obj.spec.paused - end - - if paused then - hs.status = "Suspended" - hs.message = "Rollout is paused" - return hs - end - return nil - end + end + return nil + end - hs = {} - if obj.status ~= nil then - if obj.status.conditions ~= nil then + function statusfromcondition(obj) + local hs={} for _, condition in ipairs(obj.status.conditions) do - if condition.type == "InvalidSpec" then - hs.status = "Degraded" - hs.message = condition.message + if condition.type == "InvalidSpec" then + hs.status = "Degraded" + hs.message = condition.message return hs - end - if condition.type == "Progressing" and condition.reason == "RolloutAborted" then - hs.status = "Degraded" - hs.message = condition.message + end + if condition.type == "Progressing" and condition.reason == "RolloutAborted" then + hs.status = "Degraded" + hs.message = condition.message return hs - end - if condition.type == "Progressing" and condition.reason == "ProgressDeadlineExceeded" then - hs.status = "Degraded" - hs.message = condition.message + end + if condition.type == "Progressing" and condition.reason == "ProgressDeadlineExceeded" then + hs.status = "Degraded" + hs.message = condition.message return hs - end + end end - end - if obj.status.currentPodHash ~= nil then + return nil + end + + function statusfrompodhash(obj) + local hs={} if obj.spec.strategy.blueGreen ~= nil then - isPaused = checkPaused(obj) - if isPaused ~= nil then - return isPaused - end - replicasHS = checkReplicasStatus(obj) - if replicasHS ~= nil then - return replicasHS - end - if obj.status.blueGreen ~= nil and obj.status.blueGreen.activeSelector ~= nil and obj.status.blueGreen.activeSelector == obj.status.currentPodHash then - hs.status = "Healthy" - hs.message = "The active Service is serving traffic to the current pod spec" + isPaused = checkPaused(obj) + if isPaused ~= nil then + return isPaused + end + replicasHS = checkReplicasStatus(obj) + if replicasHS ~= nil then + return replicasHS + end + if obj.status.blueGreen ~= nil and obj.status.blueGreen.activeSelector ~= nil and obj.status.blueGreen.activeSelector == obj.status.currentPodHash then + hs.status = "Healthy" + hs.message = "The active Service is serving traffic to the current pod spec" + return hs + end + hs.status = "Progressing" + hs.message = "The current pod spec is not receiving traffic from the active service" return hs - end - hs.status = "Progressing" - hs.message = "The current pod spec is not receiving traffic from the active service" - return hs end if obj.spec.strategy.recreate ~= nil then - isPaused = checkPaused(obj) - if isPaused ~= nil then - return isPaused - end - replicasHS = checkReplicasStatus(obj) - if replicasHS ~= nil then - return replicasHS - end - if obj.status.recreate ~= nil and obj.status.recreate.currentRS ~= nil and obj.status.recreate.currentRS == obj.status.currentPodHash then - hs.status = "Healthy" - hs.message = "Rollout is successful" + isPaused = checkPaused(obj) + if isPaused ~= nil then + return isPaused + end + replicasHS = checkReplicasStatus(obj) + if replicasHS ~= nil then + return replicasHS + end + if obj.status.recreate ~= nil and obj.status.recreate.currentRS ~= nil and obj.status.recreate.currentRS == obj.status.currentPodHash then + hs.status = "Healthy" + hs.message = "Rollout is successful" + return hs + end + hs.status = "Progressing" + hs.message = "Rollout is in progress" return hs - end - hs.status = "Progressing" - hs.message = "Rollout is in progress" - return hs end if obj.spec.strategy.canary ~= nil then - currentRSIsStable = obj.status.canary.stableRS == obj.status.currentPodHash - if obj.spec.strategy.canary.steps ~= nil and table.getn(obj.spec.strategy.canary.steps) > 0 then + if obj.status.stableRS ~= nil then + currentRSIsStable = obj.status.stableRS == obj.status.currentPodHash + end + if obj.status.canary.stableRS ~= nil then + currentRSIsStable = obj.status.canary.stableRS == obj.status.currentPodHash + end + if obj.spec.strategy.canary.steps ~= nil and table.getn(obj.spec.strategy.canary.steps) > 0 then stepCount = table.getn(obj.spec.strategy.canary.steps) if obj.status.currentStepIndex ~= nil then - currentStepIndex = obj.status.currentStepIndex - isPaused = checkPaused(obj) - if isPaused ~= nil then + currentStepIndex = obj.status.currentStepIndex + isPaused = checkPaused(obj) + if isPaused ~= nil then return isPaused - end - - if paused then + end + + if paused then hs.status = "Suspended" hs.message = "Rollout is paused" return hs - end - if currentRSIsStable and stepCount == currentStepIndex then + end + if currentRSIsStable and stepCount == currentStepIndex then replicasHS = checkReplicasStatus(obj) if replicasHS ~= nil then - return replicasHS + return replicasHS end hs.status = "Healthy" hs.message = "The rollout has completed all steps" return hs - end + end end hs.status = "Progressing" hs.message = "Waiting for rollout to finish steps" return hs - end + end - -- The detecting the health of the Canary deployment when there are no steps - replicasHS = checkReplicasStatus(obj) - if replicasHS ~= nil then + -- The detecting the health of the Canary deployment when there are no steps + replicasHS = checkReplicasStatus(obj) + if replicasHS ~= nil then return replicasHS - end - if currentRSIsStable then + end + if currentRSIsStable then hs.status = "Healthy" hs.message = "The rollout has completed canary deployment" return hs - end - hs.status = "Progressing" - hs.message = "Waiting for rollout to finish canary deployment" + end + hs.status = "Progressing" + hs.message = "Waiting for rollout to finish canary deployment" end - end - end - hs.status = "Progressing" - hs.message = "Waiting for rollout to finish: status has not been reconciled." + + + return hs + end + + -- Main Code + hs = {} + if obj.status.phase ~= nil then + if obj.status.phase == "Paused" then + hs.status = "Progressing" + hs.message = "Rollout is paused" + elseif obj.status.phase == "Progressing" then + hs=statusfromcondition(obj) or hs + hs=statusfrompodhash(obj) or hs + elseif obj.status.phase == "Healthy" then + hs=statusfromcondition(obj) or hs + hs=statusfrompodhash(obj) or hs + else + hs.status = obj.status.phase + hs.message = obj.status.message + end + else + if obj.status ~= nil then + if obj.status.conditions ~= nil then + hs=statusfromcondition(obj) + end + if obj.status.currentPodHash ~= nil then + hs=statusfrompodhash(obj) + end + end + end return hs \ No newline at end of file diff --git a/charts/devtron/templates/devtron.yaml b/charts/devtron/templates/devtron.yaml index a27d44529b..14ab283a11 100644 --- a/charts/devtron/templates/devtron.yaml +++ b/charts/devtron/templates/devtron.yaml @@ -13,6 +13,7 @@ data: DEVTRON_HELM_RELEASE_NAME: {{ $.Release.Name }} DEVTRON_HELM_RELEASE_NAMESPACE: {{ $.Release.Namespace }} FEATURE_MIGRATE_ARGOCD_APPLICATION_ENABLE: "true" + GRPC_ENFORCE_ALPN_ENABLED: "false" {{ toYaml $.Values.global.dbConfig | indent 2 }} HELM_CLIENT_URL: kubelink-service-headless:50051 DASHBOARD_PORT: "80" diff --git a/charts/devtron/templates/migrator.yaml b/charts/devtron/templates/migrator.yaml index b08dd32d1a..c95d8b7212 100644 --- a/charts/devtron/templates/migrator.yaml +++ b/charts/devtron/templates/migrator.yaml @@ -443,4 +443,67 @@ spec: activeDeadlineSeconds: 1500 {{- end }} {{- end }} +{{- end }} +{{- if and $.Values.global.externalPostgres $.Values.global.externalPostgres.enabled }} +--- +{{- if $.Capabilities.APIVersions.Has "batch/v1/Job" }} +apiVersion: batch/v1 +{{- else }} +apiVersion: batch/v1beta1 +{{- end }} +kind: Job +metadata: + namespace: devtroncd + name: postgresql-create-databases-{{ randAlphaNum 5 | lower }} + annotations: + "helm.sh/hook": pre-install +spec: + activeDeadlineSeconds: 1500 + ttlSecondsAfterFinished: 1000 + backoffLimit: 20 + completions: 1 + parallelism: 1 + template: + metadata: + labels: + app: database-creator + spec: + {{- include "common.schedulerConfig" (dict "nodeSelector" $.Values.components.migrator.nodeSelector "tolerations" $.Values.components.migrator.tolerations "imagePullSecrets" $.Values.components.migrator.imagePullSecrets "global" $.Values.global) | indent 6 }} + serviceAccountName: devtron-default-sa + containers: + - command: + - /bin/sh + - -c + - | + # Create databases + export PGPASSWORD="${DB_PASSWORD}" + + echo "Creating database: orchestrator" + psql -h ${PG_ADDR} -p ${PG_PORT} -U ${PG_USER} -d postgres -c "CREATE DATABASE orchestrator;" || echo "Database orchestrator already exists or failed to create" + + echo "Creating database: git_sensor" + psql -h ${PG_ADDR} -p ${PG_PORT} -U ${PG_USER} -d postgres -c "CREATE DATABASE git_sensor;" || echo "Database git_sensor already exists or failed to create" + + echo "Creating database: lens" + psql -h ${PG_ADDR} -p ${PG_PORT} -U ${PG_USER} -d postgres -c "CREATE DATABASE lens;" || echo "Database lens already exists or failed to create" + + echo "Creating database: casbin" + psql -h ${PG_ADDR} -p ${PG_PORT} -U ${PG_USER} -d postgres -c "CREATE DATABASE casbin;" || echo "Database casbin already exists or failed to create" + + echo "Creating database: clairv4" + psql -h ${PG_ADDR} -p ${PG_PORT} -U ${PG_USER} -d postgres -c "CREATE DATABASE clairv4;" || echo "Database clairv4 already exists or failed to create" + + echo "All databases created successfully" + envFrom: + - secretRef: + name: postgresql-migrator + - configMapRef: + name: devtron-cm + - configMapRef: + name: devtron-custom-cm + - configMapRef: + name: devtron-common-cm + image: {{ include "common.image" (dict "component" $.Values.components.postgres "global" $.Values.global "extraImage" $.Values.components.postgres.image ) }} + name: postgresql-database-creator + restartPolicy: OnFailure {{- end }} \ No newline at end of file diff --git a/charts/devtron/templates/networkpolicies.yaml b/charts/devtron/templates/networkpolicies.yaml index 1a262d8c9c..0ba5939494 100644 --- a/charts/devtron/templates/networkpolicies.yaml +++ b/charts/devtron/templates/networkpolicies.yaml @@ -28,7 +28,7 @@ kind: NetworkPolicy metadata: labels: app: postgresql - name: netpol-devtron-postgress + name: netpol-devtron-postgres namespace: devtroncd spec: policyTypes: @@ -42,8 +42,8 @@ spec: - port: 5432 podSelector: matchLabels: - app: postgresql - release: devtron + app.kubernetes.io/name: postgres + app.kubernetes.io/instance: devtron --- apiVersion: networking.k8s.io/v1 kind: NetworkPolicy @@ -231,5 +231,19 @@ spec: matchLabels: app.kubernetes.io/name: nats app.kubernetes.io/instance: devtron-nats +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: allow-all-ingress-in-devtron + namespace: devtroncd +spec: + podSelector: + matchLabels: + app: devtron + policyTypes: + - Ingress + ingress: + - {} {{- end }} {{- end }} diff --git a/charts/devtron/values.yaml b/charts/devtron/values.yaml index 4a0055aa41..ac5eec5cb0 100644 --- a/charts/devtron/values.yaml +++ b/charts/devtron/values.yaml @@ -42,7 +42,7 @@ nfs: extraManifests: [] installer: repo: "devtron-labs/devtron" - release: "v1.7.0" + release: "v1.8.0" registry: "" image: inception tag: 473deaa4-185-21582 @@ -96,13 +96,13 @@ components: ENABLE_RESOURCE_SCAN: "true" FEATURE_CODE_MIRROR_ENABLE: "true" registry: "" - image: "dashboard:a85f2624-690-33873" + image: "dashboard:3646fa5d-690-34658" imagePullPolicy: IfNotPresent healthPort: 8080 devtron: registry: "" - image: "hyperion:c8e75fb3-280-33879" - cicdImage: "devtron:c8e75fb3-434-33854" + image: "hyperion:22cac3b8-280-34537" + cicdImage: "devtron:22cac3b8-434-34538" imagePullPolicy: IfNotPresent customOverrides: {} healthPort: 8080 @@ -139,7 +139,7 @@ components: # - devtron.example.com ciRunner: registry: "" - image: "ci-runner:a4fc9044-138-33875" + image: "ci-runner:f21e02cb-138-34532" # Add annotations for ci-runner & cd-runner serviceAccount. annotations: {} argocdDexServer: @@ -150,7 +150,7 @@ components: authenticator: "authenticator:e414faff-393-13273" kubelink: registry: "" - image: "kubelink:a4fc9044-564-33855" + image: "kubelink:f21e02cb-564-34528" imagePullPolicy: IfNotPresent healthPort: 50052 podSecurityContext: @@ -173,7 +173,7 @@ components: keyName: postgresql-password kubewatch: registry: "" - image: "kubewatch:a4fc9044-419-33852" + image: "kubewatch:f21e02cb-419-34527" imagePullPolicy: IfNotPresent healthPort: 8080 configs: @@ -199,7 +199,7 @@ components: volumeSize: "20Gi" gitsensor: registry: "" - image: "git-sensor:a4fc9044-200-33872" + image: "git-sensor:f21e02cb-200-34529" imagePullPolicy: IfNotPresent serviceMonitor: enabled: false @@ -217,7 +217,7 @@ components: # Values for lens lens: registry: "" - image: "lens:a4fc9044-333-33874" + image: "lens:f21e02cb-333-34531" imagePullPolicy: IfNotPresent secrets: {} resources: {} @@ -254,7 +254,7 @@ components: entMigratorImage: "devtron-utils:geni-v1.1.4" chartSync: registry: "" - image: chart-sync:a4fc9044-836-33878 + image: chart-sync:f21e02cb-836-34536 schedule: "0 19 * * *" extraConfigs: {} podSecurityContext: @@ -274,12 +274,18 @@ argo-cd: # -- If defined, a repository applied to all Argo CD deployments repository: quay.io/argoproj/argocd # -- Overrides the global Argo CD image tag whose default is the chart appVersion - tag: "v2.5.2" + tag: "v2.13.3" # -- If defined, a imagePullPolicy applied to all Argo CD deployments imagePullPolicy: IfNotPresent configs: secret: createSecret: false + cm: + create: false + # argocd-rbac-cm + rbac: + create: true + policy.default: role:admin # argocd-application-controller controller: args: @@ -342,7 +348,6 @@ argo-cd: tag: 7.0.5-alpine # argocd-server server: - configEnabled: false affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: @@ -365,9 +370,6 @@ argo-cd: - all readOnlyRootFilesystem: true runAsNonRoot: true - # argocd-rbac-cm - rbacConfig: - policy.default: role:admin # argocd-repo-server repoServer: affinity: @@ -401,14 +403,14 @@ argo-cd: readOnlyRootFilesystem: true runAsNonRoot: true applicationSet: - enabled: false + replicas: 0 notifications: enabled: false # Values for security integration security: enabled: false imageScanner: - image: "image-scanner:a4fc9044-141-33877" + image: "image-scanner:f21e02cb-141-34534" healthPort: 8080 configs: TRIVY_DB_REPOSITORY: mirror.gcr.io/aquasec/trivy-db @@ -427,7 +429,7 @@ security: notifier: enabled: false imagePullPolicy: IfNotPresent - image: "notifier:19d654ff-372-33876" + image: "notifier:fb96112e-372-34533" configs: CD_ENVIRONMENT: PROD secrets: {} diff --git a/devtron-images.txt.source b/devtron-images.txt.source index d7b4499a3e..34a7c1119e 100644 --- a/devtron-images.txt.source +++ b/devtron-images.txt.source @@ -1,5 +1,5 @@ public.ecr.aws/docker/library/redis:7.0.5-alpine -quay.io/argoproj/argocd:v2.5.2 +quay.io/argoproj/argocd:v2.13.3 quay.io/argoproj/workflow-controller:v3.4.3 quay.io/devtron/alpine-k8s-utils:latest quay.io/devtron/alpine-netshoot:latest @@ -7,26 +7,26 @@ quay.io/devtron/authenticator:e414faff-393-13273 quay.io/devtron/bats:v1.4.1 quay.io/devtron/busybox:1.31.1 quay.io/devtron/centos-k8s-utils:latest -quay.io/devtron/chart-sync:a4fc9044-836-33878 -quay.io/devtron/ci-runner:a4fc9044-138-33875 +quay.io/devtron/chart-sync:f21e02cb-836-34536 +quay.io/devtron/ci-runner:f21e02cb-138-34532 quay.io/devtron/clair:4.3.6 quay.io/devtron/curl:7.73.0 -quay.io/devtron/dashboard:a85f2624-690-33873 +quay.io/devtron/dashboard:3646fa5d-690-34658 quay.io/devtron/devtron-utils:dup-chart-repo-v1.1.0 -quay.io/devtron/devtron:c8e75fb3-434-33854 +quay.io/devtron/devtron:22cac3b8-434-34538 quay.io/devtron/dex:v2.30.2 -quay.io/devtron/git-sensor:a4fc9044-200-33872 +quay.io/devtron/git-sensor:f21e02cb-200-34529 quay.io/devtron/grafana:7.3.1 -quay.io/devtron/hyperion:c8e75fb3-280-33879 -quay.io/devtron/image-scanner:a4fc9044-141-33877 +quay.io/devtron/hyperion:22cac3b8-280-34537 +quay.io/devtron/image-scanner:f21e02cb-141-34534 quay.io/devtron/inception:473deaa4-185-21582 quay.io/devtron/k8s-sidecar:1.1.0 quay.io/devtron/k8s-utils:tutum-curl quay.io/devtron/k9s-k8s-utils:latest quay.io/devtron/kubectl:latest -quay.io/devtron/kubelink:a4fc9044-564-33855 -quay.io/devtron/kubewatch:a4fc9044-419-33852 -quay.io/devtron/lens:a4fc9044-333-33874 +quay.io/devtron/kubelink:f21e02cb-564-34528 +quay.io/devtron/kubewatch:f21e02cb-419-34527 +quay.io/devtron/lens:f21e02cb-333-34531 quay.io/devtron/migrator:v4.16.2 quay.io/devtron/minideb:latest quay.io/devtron/minio-mc:RELEASE.2021-02-14T04-28-06Z @@ -34,7 +34,7 @@ quay.io/devtron/minio:RELEASE.2021-02-14T04-01-33Z quay.io/devtron/nats-box quay.io/devtron/nats-server-config-reloader:0.6.2 quay.io/devtron/nats:2.9.3-alpine -quay.io/devtron/notifier:19d654ff-372-33876 +quay.io/devtron/notifier:fb96112e-372-34533 quay.io/devtron/postgres:14.9 quay.io/devtron/postgres_exporter:v0.10.1 quay.io/devtron/postgres_exporter:v0.4.7 diff --git a/manifests/install/devtron-installer.yaml b/manifests/install/devtron-installer.yaml index 96c19c3857..588ccb20de 100644 --- a/manifests/install/devtron-installer.yaml +++ b/manifests/install/devtron-installer.yaml @@ -4,4 +4,4 @@ metadata: name: installer-devtron namespace: devtroncd spec: - url: https://raw.githubusercontent.com/devtron-labs/devtron/v1.7.0/manifests/installation-script + url: https://raw.githubusercontent.com/devtron-labs/devtron/v1.8.0/manifests/installation-script diff --git a/manifests/installation-script b/manifests/installation-script index 547c3c0adf..b9571e0df9 100644 --- a/manifests/installation-script +++ b/manifests/installation-script @@ -1,4 +1,4 @@ -LTAG="v1.7.0"; +LTAG="v1.8.0"; REPO_RAW_URL="https://raw.githubusercontent.com/devtron-labs/devtron/"; shebang = `#!/bin/bash `; diff --git a/releasenotes.md b/releasenotes.md index db70d037ab..1a6ec8f889 100644 --- a/releasenotes.md +++ b/releasenotes.md @@ -1,33 +1,22 @@ -## v1.7.0 +## v1.8.0 ## Enhancements -- feat: Added Cronjob chart 1-6-0 (#6650) -- feat: wf logs (#6606) -- feat: Enable selection of all CI pipelines at once when the Environment filter is applied in Notifications (#6526) +- feat: added the plugin for aws ecr retagging (#6695) +- feat: flux cd deployment (#6660) +- feat: add app name in labels list api (#6688) +- feat: Audit ci trigger, precd and post cd trigger so that retrigger can happen from last failed config snapshot (#6659) ## Bugs -- fix: app workflow cd pipleine check (#6658) -- fix: panic fixes on concurrent delete request (#6657) -- fix: panic fix on concurrent deletion request (#6644) -- fix: duplicate entries in deployment history without override (#6637) -- fix: overriden pipeline ids filtering in case of material deletion (#6636) -- fix: prevent deletion of git material used in overridden CI templates (#6633) -- fix: ea mode fixes (#6624) -- fix: stack Manager issues (#6619) -- fix: Change ci to webhook fix (#6626) -- fix: oci chart deployment values.yaml and requirement.yaml not compatible (#6620) -- fix: panic fix installedApp type timeline update (#6614) -- fix: workflow getting incorrectly deleted in case of webhook and unreachable cluster's cd pipeline (#6602) -- fix: add safety checks to prevent index-out-of-range panics in CdHandler (#6597) -- fix: reverted telemetry connection error (#6587) -- fix: anomalies in deployment status timeline (#6569) -- fix: scoped var complex type resolution not working in patch type overrides (#6572) +- fix: argo sync (#6718) +- fix: cluster delete (#6706) +- fix: Notifier v1 removed (#6705) +- fix: app clone panic (#6696) +- fix: Spdy migration to websocket (#6682) +- fix: Fix scanning optimisation (#6683) +- fix: panic in logs api (#6684) +- fix: Empty migration seq (#6673) ## Others -- chore: when a cluster event occurs, create config map instead of secret (#6607) -- chore: Gpu workload chart (#6608) -- misc: update sample dockerfiles use non-root user (UID 2002) and base images (#6512) -- misc: wire in EA (#6616) -- chore: removed multi-arch section from readme (#6613) -- chore: git sensor grpc lb policy change (#6610) -- misc: go routines wrapped into panic safe function (#6589) -- chore: http transport service refactoring (#6592) -- misc: GetConfigDBObj in tx (#6584) +- chore: added sql file of 4.21 (#6716) +- misc: added support for service extraSpec (#6702) +- chore: when output dir path is /devtroncd in any pipeline stage step then the ci runner is stuck in recursive self-copy situation (#6686) + +